pax_global_header00006660000000000000000000000064122415153070014512gustar00rootroot0000000000000052 comment=14d20ec9b386aaf657cdf394865edbfbbe3fa850 ntdb-1.0/000077500000000000000000000000001224151530700123015ustar00rootroot00000000000000ntdb-1.0/ABI/000077500000000000000000000000001224151530700126745ustar00rootroot00000000000000ntdb-1.0/ABI/ntdb-0.9.sigs000066400000000000000000000050151224151530700150170ustar00rootroot00000000000000ntdb_add_flag: void (struct ntdb_context *, unsigned int) ntdb_append: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA, NTDB_DATA) ntdb_chainlock: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA) ntdb_chainlock_read: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA) ntdb_chainunlock: void (struct ntdb_context *, NTDB_DATA) ntdb_chainunlock_read: void (struct ntdb_context *, NTDB_DATA) ntdb_check_: enum NTDB_ERROR (struct ntdb_context *, enum NTDB_ERROR (*)(NTDB_DATA, NTDB_DATA, void *), void *) ntdb_close: int (struct ntdb_context *) ntdb_delete: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA) ntdb_errorstr: const char *(enum NTDB_ERROR) ntdb_exists: bool (struct ntdb_context *, NTDB_DATA) ntdb_fd: int (const struct ntdb_context *) ntdb_fetch: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA, NTDB_DATA *) ntdb_firstkey: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA *) ntdb_foreach_: void (int (*)(struct ntdb_context *, void *), void *) ntdb_get_attribute: enum NTDB_ERROR (struct ntdb_context *, union ntdb_attribute *) ntdb_get_flags: unsigned int (struct ntdb_context *) ntdb_get_seqnum: int64_t (struct ntdb_context *) ntdb_lockall: enum NTDB_ERROR (struct ntdb_context *) ntdb_lockall_read: enum NTDB_ERROR (struct ntdb_context *) ntdb_name: const char *(const struct ntdb_context *) ntdb_nextkey: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA *) ntdb_open: struct ntdb_context *(const char *, int, int, mode_t, union ntdb_attribute *) ntdb_parse_record_: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA, enum NTDB_ERROR (*)(NTDB_DATA, NTDB_DATA, void *), void *) ntdb_remove_flag: void (struct ntdb_context *, unsigned int) ntdb_repack: enum NTDB_ERROR (struct ntdb_context *) ntdb_set_attribute: enum NTDB_ERROR (struct ntdb_context *, const union ntdb_attribute *) ntdb_store: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA, NTDB_DATA, int) ntdb_summary: enum NTDB_ERROR (struct ntdb_context *, enum ntdb_summary_flags, char **) ntdb_transaction_cancel: void (struct ntdb_context *) ntdb_transaction_commit: enum NTDB_ERROR (struct ntdb_context *) ntdb_transaction_prepare_commit: enum NTDB_ERROR (struct ntdb_context *) ntdb_transaction_start: enum NTDB_ERROR (struct ntdb_context *) ntdb_traverse_: int64_t (struct ntdb_context *, int (*)(struct ntdb_context *, NTDB_DATA, NTDB_DATA, void *), void *) ntdb_unlockall: void (struct ntdb_context *) ntdb_unlockall_read: void (struct ntdb_context *) ntdb_unset_attribute: void (struct ntdb_context *, enum ntdb_attribute_type) ntdb_wipe_all: enum NTDB_ERROR (struct ntdb_context *) ntdb-1.0/LICENSE000066400000000000000000000167251224151530700133210ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ntdb-1.0/Makefile000066400000000000000000000015631224151530700137460ustar00rootroot00000000000000# simple makefile wrapper to run waf WAF=WAF_MAKE=1 PATH=buildtools/bin:../../buildtools/bin:$$PATH waf all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: FORCE $(WAF) test $(TEST_OPTIONS) testenv: $(WAF) test --testenv $(TEST_OPTIONS) quicktest: $(WAF) test --quick $(TEST_OPTIONS) dist: touch .tmplock WAFLOCK=.tmplock $(WAF) dist distcheck: touch .tmplock WAFLOCK=.tmplock $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all .PHONY: check check: test torture: all # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags pydoctor: $(WAF) pydoctor bin/%:: FORCE $(WAF) --targets=`basename $@` FORCE: ntdb-1.0/buildtools/000077500000000000000000000000001224151530700144615ustar00rootroot00000000000000ntdb-1.0/buildtools/README000066400000000000000000000005621224151530700153440ustar00rootroot00000000000000See http://code.google.com/p/waf/ for more information on waf You can get a svn copy of the upstream source with: svn checkout http://waf.googlecode.com/svn/trunk/ waf-read-only Samba currently uses waf 1.5, which can be found at: http://waf.googlecode.com/svn/branches/waf-1.5 To update the current copy of waf, use the update-waf.sh script in this directory. ntdb-1.0/buildtools/bin/000077500000000000000000000000001224151530700152315ustar00rootroot00000000000000ntdb-1.0/buildtools/bin/waf000077500000000000000000000043211224151530700157340ustar00rootroot00000000000000#!/usr/bin/env python # encoding: ISO-8859-1 # Thomas Nagy, 2005-2010 """ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import os, sys if sys.hexversion<0x203000f: raise ImportError("Waf requires Python >= 2.3") if 'PSYCOWAF' in os.environ: try:import psyco;psyco.full() except:pass VERSION="1.5.19" REVISION="x" INSTALL="x" C1='x' C2='x' cwd = os.getcwd() join = os.path.join WAF='waf' def b(x): return x if sys.hexversion>0x300000f: WAF='waf3' def b(x): return x.encode() def err(m): print(('\033[91mError: %s\033[0m' % m)) sys.exit(1) def test(dir): try: os.stat(join(dir, 'wafadmin')); return os.path.abspath(dir) except OSError: pass def find_lib(): return os.path.abspath(os.path.dirname(os.path.dirname(__file__))) wafdir = find_lib() w = join(wafdir, 'wafadmin') t = join(w, 'Tools') f = join(w, '3rdparty') sys.path = [w, t, f] + sys.path if __name__ == '__main__': import Scripting Scripting.prepare(t, cwd, VERSION, wafdir) ntdb-1.0/buildtools/compare_config_h4.sh000077500000000000000000000005021224151530700203630ustar00rootroot00000000000000#!/bin/sh # compare the generated config.h from a waf build with existing samba # build grep "^.define" bin/default/source4/include/config.h | sort > waf-config.h grep "^.define" $HOME/samba_old/source4/include/config.h | sort > old-config.h comm -23 old-config.h waf-config.h #echo #diff -u old-config.h waf-config.h ntdb-1.0/buildtools/compare_generated.sh000077500000000000000000000025121224151530700204640ustar00rootroot00000000000000#!/bin/sh # compare the generated files from a waf old_build=$HOME/samba_old gen_files=$(cd bin/default && find . -type f -name '*.[ch]') 2>&1 strip_file() { in_file=$1 out_file=$2 cat $in_file | grep -v 'The following definitions come from' | grep -v 'Automatically generated at' | grep -v 'Generated from' | sed 's|/home/tnagy/samba/source4||g' | sed 's|/home/tnagy/samba/|../|g' | sed 's|bin/default/source4/||g' | sed 's|bin/default/|../|g' | sed 's/define _____/define ___/g' | sed 's/define __*/define _/g' | sed 's/define _DEFAULT_/define _/g' | sed 's/define _SOURCE4_/define ___/g' | sed 's/define ___/define _/g' | sed 's/ifndef ___/ifndef _/g' | sed 's|endif /* ____|endif /* __|g' | sed s/__DEFAULT_SOURCE4/__/ | sed s/__DEFAULT_SOURCE4/__/ | sed s/__DEFAULT/____/ > $out_file } compare_file() { f=$f bname=$(basename $f) t1=/tmp/$bname.old.$$ t2=/tmp/$bname.new.$$ strip_file $old_build/$f $t1 strip_file bin/default/$f $t2 diff -u -b $t1 $t2 2>&1 rm -f $t1 $t2 } for f in $gen_files; do compare_file $f done ntdb-1.0/buildtools/compare_install.sh000077500000000000000000000002121224151530700201670ustar00rootroot00000000000000#!/bin/sh prefix1="$1" prefix2="$2" (cd $prefix1 && find . ) | sort > p1.txt (cd $prefix2 && find . ) | sort > p2.txt diff -u p[12].txt ntdb-1.0/buildtools/scripts/000077500000000000000000000000001224151530700161505ustar00rootroot00000000000000ntdb-1.0/buildtools/scripts/Makefile.waf000066400000000000000000000017671224151530700203770ustar00rootroot00000000000000# simple makefile wrapper to run waf WAF_BINARY=BUILDTOOLS/bin/waf WAF=WAF_MAKE=1 $(WAF_BINARY) all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: $(WAF) test $(TEST_OPTIONS) help: @echo NOTE: to run extended waf options use $(WAF_BINARY) or modify your PATH $(WAF) --help testenv: $(WAF) test --testenv $(TEST_OPTIONS) quicktest: $(WAF) test --quick $(TEST_OPTIONS) dist: $(WAF) dist distcheck: $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all check: test torture: all # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags bin/%:: FORCE $(WAF) --targets=$@ FORCE: configure: autogen-waf.sh BUILDTOOLS/scripts/configure.waf ./autogen-waf.sh Makefile: autogen-waf.sh configure BUILDTOOLS/scripts/Makefile.waf ./autogen-waf.sh ntdb-1.0/buildtools/scripts/abi_gen.sh000077500000000000000000000007531224151530700201000ustar00rootroot00000000000000#!/bin/sh # generate a set of ABI signatures from a shared library SHAREDLIB="$1" GDBSCRIPT="gdb_syms.$$" ( cat < $GDBSCRIPT # forcing the terminal avoids a problem on Fedora12 TERM=none gdb -batch -x $GDBSCRIPT "$SHAREDLIB" < /dev/null rm -f $GDBSCRIPT ntdb-1.0/buildtools/scripts/autogen-waf.sh000077500000000000000000000013521224151530700207250ustar00rootroot00000000000000#!/bin/sh p=`dirname $0` echo "Setting up for waf build" echo "Looking for the buildtools directory" d="buildtools" while test \! -d "$p/$d"; do d="../$d"; done echo "Found buildtools in $p/$d" echo "Setting up configure" rm -f $p/configure $p/include/config*.h* sed "s|BUILDTOOLS|$d|g;s|BUILDPATH|$p|g" < "$p/$d/scripts/configure.waf" > $p/configure chmod +x $p/configure echo "Setting up Makefile" rm -f $p/makefile $p/Makefile sed "s|BUILDTOOLS|$d|g" < "$p/$d/scripts/Makefile.waf" > $p/Makefile echo "done. Now run $p/configure or $p/configure.developer then make." if [ $p != "." ]; then echo "Notice: The build invoke path is not 'source4'! Use make with the parameter" echo "-C <'source4' path>. Example: make -C source4 all" fi ntdb-1.0/buildtools/scripts/configure.waf000077500000000000000000000003711224151530700206340ustar00rootroot00000000000000#!/bin/sh PREVPATH=`dirname $0` WAF=BUILDTOOLS/bin/waf # using JOBS=1 gives maximum compatibility with # systems like AIX which have broken threading in python JOBS=1 export JOBS cd BUILDPATH || exit 1 $WAF configure "$@" || exit 1 cd $PREVPATH ntdb-1.0/buildtools/testwaf.sh000077500000000000000000000026001224151530700164730ustar00rootroot00000000000000#!/bin/bash set -e set -x d=$(dirname $0) cd $d/.. PREFIX=$HOME/testprefix if [ $# -gt 0 ]; then tests="$*" else tests="lib/replace lib/talloc lib/tevent lib/tdb lib/ldb" fi echo "testing in dirs $tests" for d in $tests; do echo "`date`: testing $d" pushd $d rm -rf bin type waf waf dist ./configure -C --enable-developer --prefix=$PREFIX time make make install make distcheck case $d in "lib/ldb") ldd bin/ldbadd ;; "lib/replace") ldd bin/replace_testsuite ;; "lib/talloc") ldd bin/talloc_testsuite ;; "lib/tdb") ldd bin/tdbtool ;; esac popd done echo "testing python portability" pushd lib/talloc versions="python2.4 python2.5 python2.6 python3.0 python3.1" for p in $versions; do ret=$(which $p || echo "failed") if [ $ret = "failed" ]; then echo "$p not found, skipping" continue fi echo "Testing $p" $p ../../buildtools/bin/waf configure -C --enable-developer --prefix=$PREFIX $p ../../buildtools/bin/waf build install done popd echo "testing cross compiling" pushd lib/talloc ret=$(which arm-linux-gnueabi-gcc || echo "failed") if [ $ret != "failed" ]; then CC=arm-linux-gnueabi-gcc ./configure -C --prefix=$PREFIX --cross-compile --cross-execute='runarm' make && make install else echo "Cross-compiler not installed, skipping test" fi popd ntdb-1.0/buildtools/update-waf.sh000077500000000000000000000004171224151530700170570ustar00rootroot00000000000000#!/bin/sh # Update our copy of waf TARGETDIR="`dirname $0`" WORKDIR="`mktemp -d -t update-waf-XXXXXX`" mkdir -p "$WORKDIR" git clone https://code.google.com/p/waf.waf15/ "$WORKDIR" rsync -C -avz --delete "$WORKDIR/wafadmin/" "$TARGETDIR/wafadmin/" rm -rf "$WORKDIR" ntdb-1.0/buildtools/wafadmin/000077500000000000000000000000001224151530700162475ustar00rootroot00000000000000ntdb-1.0/buildtools/wafadmin/3rdparty/000077500000000000000000000000001224151530700200175ustar00rootroot00000000000000ntdb-1.0/buildtools/wafadmin/3rdparty/ParallelDebug.py000066400000000000000000000205321224151530700230760ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2007-2010 (ita) """ debugging helpers for parallel compilation, outputs a svg file in the build directory """ import os, time, sys, threading try: from Queue import Queue except: from queue import Queue import Runner, Options, Utils, Task, Logs from Constants import * #import random #random.seed(100) def set_options(opt): opt.add_option('--dtitle', action='store', default='Parallel build representation for %r' % ' '.join(sys.argv), help='title for the svg diagram', dest='dtitle') opt.add_option('--dwidth', action='store', type='int', help='diagram width', default=1000, dest='dwidth') opt.add_option('--dtime', action='store', type='float', help='recording interval in seconds', default=0.009, dest='dtime') opt.add_option('--dband', action='store', type='int', help='band width', default=22, dest='dband') opt.add_option('--dmaxtime', action='store', type='float', help='maximum time, for drawing fair comparisons', default=0, dest='dmaxtime') # red #ff4d4d # green #4da74d # lila #a751ff color2code = { 'GREEN' : '#4da74d', 'YELLOW' : '#fefe44', 'PINK' : '#a751ff', 'RED' : '#cc1d1d', 'BLUE' : '#6687bb', 'CYAN' : '#34e2e2', } mp = {} info = [] # list of (text,color) def map_to_color(name): if name in mp: return mp[name] try: cls = Task.TaskBase.classes[name] except KeyError: return color2code['RED'] if cls.color in mp: return mp[cls.color] if cls.color in color2code: return color2code[cls.color] return color2code['RED'] def loop(self): while 1: tsk=Runner.TaskConsumer.ready.get() tsk.master.set_running(1, id(threading.currentThread()), tsk) Runner.process_task(tsk) tsk.master.set_running(-1, id(threading.currentThread()), tsk) Runner.TaskConsumer.loop = loop old_start = Runner.Parallel.start def do_start(self): print Options.options try: Options.options.dband except AttributeError: raise ValueError('use def options(opt): opt.load("parallel_debug")!') self.taskinfo = Queue() old_start(self) process_colors(self) Runner.Parallel.start = do_start def set_running(self, by, i, tsk): self.taskinfo.put( (i, id(tsk), time.time(), tsk.__class__.__name__, self.processed, self.count, by) ) Runner.Parallel.set_running = set_running def name2class(name): return name.replace(' ', '_').replace('.', '_') def process_colors(producer): # first, cast the parameters tmp = [] try: while True: tup = producer.taskinfo.get(False) tmp.append(list(tup)) except: pass try: ini = float(tmp[0][2]) except: return if not info: seen = [] for x in tmp: name = x[3] if not name in seen: seen.append(name) else: continue info.append((name, map_to_color(name))) info.sort(key=lambda x: x[0]) thread_count = 0 acc = [] for x in tmp: thread_count += x[6] acc.append("%d %d %f %r %d %d %d" % (x[0], x[1], x[2] - ini, x[3], x[4], x[5], thread_count)) f = open('pdebug.dat', 'w') #Utils.write('\n'.join(acc)) f.write('\n'.join(acc)) tmp = [lst[:2] + [float(lst[2]) - ini] + lst[3:] for lst in tmp] st = {} for l in tmp: if not l[0] in st: st[l[0]] = len(st.keys()) tmp = [ [st[lst[0]]] + lst[1:] for lst in tmp ] THREAD_AMOUNT = len(st.keys()) st = {} for l in tmp: if not l[1] in st: st[l[1]] = len(st.keys()) tmp = [ [lst[0]] + [st[lst[1]]] + lst[2:] for lst in tmp ] BAND = Options.options.dband seen = {} acc = [] for x in range(len(tmp)): line = tmp[x] id = line[1] if id in seen: continue seen[id] = True begin = line[2] thread_id = line[0] for y in range(x + 1, len(tmp)): line = tmp[y] if line[1] == id: end = line[2] #print id, thread_id, begin, end #acc.append( ( 10*thread_id, 10*(thread_id+1), 10*begin, 10*end ) ) acc.append( (BAND * begin, BAND*thread_id, BAND*end - BAND*begin, BAND, line[3]) ) break if Options.options.dmaxtime < 0.1: gwidth = 1 for x in tmp: m = BAND * x[2] if m > gwidth: gwidth = m else: gwidth = BAND * Options.options.dmaxtime ratio = float(Options.options.dwidth) / gwidth gwidth = Options.options.dwidth gheight = BAND * (THREAD_AMOUNT + len(info) + 1.5) out = [] out.append(""" \n """ % (0, 0, gwidth + 4, gheight + 4, 0, 0, gwidth + 4, gheight + 4)) # main title if Options.options.dtitle: out.append("""%s """ % (gwidth/2, gheight - 5, Options.options.dtitle)) # the rectangles groups = {} for (x, y, w, h, clsname) in acc: try: groups[clsname].append((x, y, w, h)) except: groups[clsname] = [(x, y, w, h)] for cls in groups: out.append("\n" % name2class(cls)) for (x, y, w, h) in groups[cls]: out.append(""" \n""" % (2 + x*ratio, 2 + y, w*ratio, h, map_to_color(cls))) out.append("\n") # output the caption cnt = THREAD_AMOUNT for (text, color) in info: # caption box b = BAND/2 out.append("""\n""" % (name2class(text), 2 + BAND, 5 + (cnt + 0.5) * BAND, b, b, color)) # caption text out.append("""%s\n""" % (2 + 2 * BAND, 5 + (cnt + 0.5) * BAND + 10, text)) cnt += 1 out.append(""" """) out.append("\n") #node = producer.bld.path.make_node('pdebug.svg') f = open('pdebug.svg', 'w') f.write("".join(out)) ntdb-1.0/buildtools/wafadmin/3rdparty/batched_cc.py000066400000000000000000000110561224151530700224330ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) """ Batched builds - compile faster instead of compiling object files one by one, c/c++ compilers are often able to compile at once: cc -c ../file1.c ../file2.c ../file3.c Files are output on the directory where the compiler is called, and dependencies are more difficult to track (do not run the command on all source files if only one file changes) As such, we do as if the files were compiled one by one, but no command is actually run: replace each cc/cpp Task by a TaskSlave A new task called TaskMaster collects the signatures from each slave and finds out the command-line to run. To set this up, the method ccroot::create_task is replaced by a new version, to enable batched builds it is only necessary to import this module in the configuration (no other change required) """ MAX_BATCH = 50 MAXPARALLEL = False EXT_C = ['.c', '.cc', '.cpp', '.cxx'] import os, threading import TaskGen, Task, ccroot, Build, Logs from TaskGen import extension, feature, before from Constants import * cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} -c ${SRCLST}' cc_fun = Task.compile_fun_noshell('batched_cc', cc_str)[0] cxx_str = '${CXX} ${CXXFLAGS} ${CPPFLAGS} ${_CXXINCFLAGS} ${_CXXDEFFLAGS} -c ${SRCLST}' cxx_fun = Task.compile_fun_noshell('batched_cxx', cxx_str)[0] count = 70000 class batch_task(Task.Task): color = 'RED' after = 'cc cxx' before = 'cc_link cxx_link static_link' def __str__(self): return '(batch compilation for %d slaves)\n' % len(self.slaves) def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.slaves = [] self.inputs = [] self.hasrun = 0 global count count += 1 self.idx = count def add_slave(self, slave): self.slaves.append(slave) self.set_run_after(slave) def runnable_status(self): for t in self.run_after: if not t.hasrun: return ASK_LATER for t in self.slaves: #if t.executed: if t.hasrun != SKIPPED: return RUN_ME return SKIP_ME def run(self): outputs = [] self.outputs = [] srclst = [] slaves = [] for t in self.slaves: if t.hasrun != SKIPPED: slaves.append(t) srclst.append(t.inputs[0].abspath(self.env)) self.env.SRCLST = srclst self.cwd = slaves[0].inputs[0].parent.abspath(self.env) env = self.env app = env.append_unique cpppath_st = env['CPPPATH_ST'] env._CCINCFLAGS = env.CXXINCFLAGS = [] # local flags come first # set the user-defined includes paths for i in env['INC_PATHS']: app('_CCINCFLAGS', cpppath_st % i.abspath()) app('_CXXINCFLAGS', cpppath_st % i.abspath()) app('_CCINCFLAGS', cpppath_st % i.abspath(env)) app('_CXXINCFLAGS', cpppath_st % i.abspath(env)) # set the library include paths for i in env['CPPPATH']: app('_CCINCFLAGS', cpppath_st % i) app('_CXXINCFLAGS', cpppath_st % i) if self.slaves[0].__class__.__name__ == 'cc': ret = cc_fun(self) else: ret = cxx_fun(self) if ret: return ret for t in slaves: t.old_post_run() from TaskGen import extension, feature, after import cc, cxx def wrap(fun): def foo(self, node): # we cannot control the extension, this sucks self.obj_ext = '.o' task = fun(self, node) if not getattr(self, 'masters', None): self.masters = {} self.allmasters = [] if not node.parent.id in self.masters: m = self.masters[node.parent.id] = self.master = self.create_task('batch') self.allmasters.append(m) else: m = self.masters[node.parent.id] if len(m.slaves) > MAX_BATCH: m = self.masters[node.parent.id] = self.master = self.create_task('batch') self.allmasters.append(m) m.add_slave(task) return task return foo c_hook = wrap(cc.c_hook) extension(cc.EXT_CC)(c_hook) cxx_hook = wrap(cxx.cxx_hook) extension(cxx.EXT_CXX)(cxx_hook) @feature('cprogram', 'cshlib', 'cstaticlib') @after('apply_link') def link_after_masters(self): if getattr(self, 'allmasters', None): for m in self.allmasters: self.link_task.set_run_after(m) for c in ['cc', 'cxx']: t = Task.TaskBase.classes[c] def run(self): pass def post_run(self): #self.executed=1 pass def can_retrieve_cache(self): if self.old_can_retrieve_cache(): for m in self.generator.allmasters: try: m.slaves.remove(self) except ValueError: pass #this task wasn't included in that master return 1 else: return None setattr(t, 'oldrun', t.__dict__['run']) setattr(t, 'run', run) setattr(t, 'old_post_run', t.post_run) setattr(t, 'post_run', post_run) setattr(t, 'old_can_retrieve_cache', t.can_retrieve_cache) setattr(t, 'can_retrieve_cache', can_retrieve_cache) ntdb-1.0/buildtools/wafadmin/3rdparty/boost.py000066400000000000000000000252421224151530700215240ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # # partially based on boost.py written by Gernot Vormayr # written by Ruediger Sonderfeld , 2008 # modified by Bjoern Michaelsen, 2008 # modified by Luca Fossati, 2008 # rewritten for waf 1.5.1, Thomas Nagy, 2008 # #def set_options(opt): # opt.tool_options('boost') # # ... # #def configure(conf): # # ... (e.g. conf.check_tool('g++')) # conf.check_tool('boost') # conf.check_boost(lib='signals filesystem', static='onlystatic', score_version=(-1000, 1000), tag_minscore=1000) # #def build(bld): # bld(source='main.c', target='bar', uselib="BOOST BOOST_SYSTEM") # #ISSUES: # * find_includes should be called only once! # * support mandatory ######## boost update ########### ## ITA: * the method get_boost_version_number does work ## * the rest of the code has not really been tried # * make certain a demo is provided (in demos/adv for example) # TODO: bad and underdocumented code -> boost.py will be removed in waf 1.6 to be rewritten later import os.path, glob, types, re, sys import Configure, config_c, Options, Utils, Logs from Logs import warn, debug from Configure import conf boost_code = ''' #include #include int main() { std::cout << BOOST_VERSION << std::endl; } ''' boost_libpath = ['/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib'] boost_cpppath = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include'] STATIC_NOSTATIC = 'nostatic' STATIC_BOTH = 'both' STATIC_ONLYSTATIC = 'onlystatic' is_versiontag = re.compile('^\d+_\d+_?\d*$') is_threadingtag = re.compile('^mt$') is_abitag = re.compile('^[sgydpn]+$') is_toolsettag = re.compile('^(acc|borland|como|cw|dmc|darwin|gcc|hp_cxx|intel|kylix|vc|mgw|qcc|sun|vacpp)\d*$') is_pythontag=re.compile('^py[0-9]{2}$') def set_options(opt): opt.add_option('--boost-includes', type='string', default='', dest='boostincludes', help='path to the boost directory where the includes are e.g. /usr/local/include/boost-1_35') opt.add_option('--boost-libs', type='string', default='', dest='boostlibs', help='path to the directory where the boost libs are e.g. /usr/local/lib') def string_to_version(s): version = s.split('.') if len(version) < 3: return 0 return int(version[0])*100000 + int(version[1])*100 + int(version[2]) def version_string(version): major = version / 100000 minor = version / 100 % 1000 minor_minor = version % 100 if minor_minor == 0: return "%d_%d" % (major, minor) else: return "%d_%d_%d" % (major, minor, minor_minor) def libfiles(lib, pattern, lib_paths): result = [] for lib_path in lib_paths: libname = pattern % ('boost_%s[!_]*' % lib) result += glob.glob(os.path.join(lib_path, libname)) return result @conf def get_boost_version_number(self, dir): """silently retrieve the boost version number""" try: return self.run_c_code(compiler='cxx', code=boost_code, includes=dir, execute=1, env=self.env.copy(), type='cprogram', compile_mode='cxx', compile_filename='test.cpp') except Configure.ConfigurationError, e: return -1 def set_default(kw, var, val): if not var in kw: kw[var] = val def tags_score(tags, kw): """ checks library tags see http://www.boost.org/doc/libs/1_35_0/more/getting_started/unix-variants.html 6.1 """ score = 0 needed_tags = { 'threading': kw['tag_threading'], 'abi': kw['tag_abi'], 'toolset': kw['tag_toolset'], 'version': kw['tag_version'], 'python': kw['tag_python'] } if kw['tag_toolset'] is None: v = kw['env'] toolset = v['CXX_NAME'] if v['CXX_VERSION']: version_no = v['CXX_VERSION'].split('.') toolset += version_no[0] if len(version_no) > 1: toolset += version_no[1] needed_tags['toolset'] = toolset found_tags = {} for tag in tags: if is_versiontag.match(tag): found_tags['version'] = tag if is_threadingtag.match(tag): found_tags['threading'] = tag if is_abitag.match(tag): found_tags['abi'] = tag if is_toolsettag.match(tag): found_tags['toolset'] = tag if is_pythontag.match(tag): found_tags['python'] = tag for tagname in needed_tags.iterkeys(): if needed_tags[tagname] is not None and tagname in found_tags: if re.compile(needed_tags[tagname]).match(found_tags[tagname]): score += kw['score_' + tagname][0] else: score += kw['score_' + tagname][1] return score @conf def validate_boost(self, kw): ver = kw.get('version', '') for x in 'min_version max_version version'.split(): set_default(kw, x, ver) set_default(kw, 'lib', '') kw['lib'] = Utils.to_list(kw['lib']) set_default(kw, 'env', self.env) set_default(kw, 'libpath', boost_libpath) set_default(kw, 'cpppath', boost_cpppath) for x in 'tag_threading tag_version tag_toolset'.split(): set_default(kw, x, None) set_default(kw, 'tag_abi', '^[^d]*$') set_default(kw, 'python', str(sys.version_info[0]) + str(sys.version_info[1]) ) set_default(kw, 'tag_python', '^py' + kw['python'] + '$') set_default(kw, 'score_threading', (10, -10)) set_default(kw, 'score_abi', (10, -10)) set_default(kw, 'score_python', (10,-10)) set_default(kw, 'score_toolset', (1, -1)) set_default(kw, 'score_version', (100, -100)) set_default(kw, 'score_min', 0) set_default(kw, 'static', STATIC_NOSTATIC) set_default(kw, 'found_includes', False) set_default(kw, 'min_score', 0) set_default(kw, 'errmsg', 'not found') set_default(kw, 'okmsg', 'ok') @conf def find_boost_includes(self, kw): """ check every path in kw['cpppath'] for subdir that either starts with boost- or is named boost. Then the version is checked and selected accordingly to min_version/max_version. The highest possible version number is selected! If no versiontag is set the versiontag is set accordingly to the selected library and CPPPATH_BOOST is set. """ boostPath = getattr(Options.options, 'boostincludes', '') if boostPath: boostPath = [os.path.normpath(os.path.expandvars(os.path.expanduser(boostPath)))] else: boostPath = Utils.to_list(kw['cpppath']) min_version = string_to_version(kw.get('min_version', '')) max_version = string_to_version(kw.get('max_version', '')) or (sys.maxint - 1) version = 0 for include_path in boostPath: boost_paths = [p for p in glob.glob(os.path.join(include_path, 'boost*')) if os.path.isdir(p)] debug('BOOST Paths: %r' % boost_paths) for path in boost_paths: pathname = os.path.split(path)[-1] ret = -1 if pathname == 'boost': path = include_path ret = self.get_boost_version_number(path) elif pathname.startswith('boost-'): ret = self.get_boost_version_number(path) ret = int(ret) if ret != -1 and ret >= min_version and ret <= max_version and ret > version: boost_path = path version = ret if not version: self.fatal('boost headers not found! (required version min: %s max: %s)' % (kw['min_version'], kw['max_version'])) return False found_version = version_string(version) versiontag = '^' + found_version + '$' if kw['tag_version'] is None: kw['tag_version'] = versiontag elif kw['tag_version'] != versiontag: warn('boost header version %r and tag_version %r do not match!' % (versiontag, kw['tag_version'])) env = self.env env['CPPPATH_BOOST'] = boost_path env['BOOST_VERSION'] = found_version self.found_includes = 1 ret = 'Version %s (%s)' % (found_version, boost_path) return ret @conf def find_boost_library(self, lib, kw): def find_library_from_list(lib, files): lib_pattern = re.compile('.*boost_(.*?)\..*') result = (None, None) resultscore = kw['min_score'] - 1 for file in files: m = lib_pattern.search(file, 1) if m: libname = m.group(1) libtags = libname.split('-')[1:] currentscore = tags_score(libtags, kw) if currentscore > resultscore: result = (libname, file) resultscore = currentscore return result lib_paths = getattr(Options.options, 'boostlibs', '') if lib_paths: lib_paths = [os.path.normpath(os.path.expandvars(os.path.expanduser(lib_paths)))] else: lib_paths = Utils.to_list(kw['libpath']) v = kw.get('env', self.env) (libname, file) = (None, None) if kw['static'] in [STATIC_NOSTATIC, STATIC_BOTH]: st_env_prefix = 'LIB' files = libfiles(lib, v['shlib_PATTERN'], lib_paths) (libname, file) = find_library_from_list(lib, files) if libname is None and kw['static'] in [STATIC_ONLYSTATIC, STATIC_BOTH]: st_env_prefix = 'STATICLIB' staticLibPattern = v['staticlib_PATTERN'] if self.env['CC_NAME'] == 'msvc': staticLibPattern = 'lib' + staticLibPattern files = libfiles(lib, staticLibPattern, lib_paths) (libname, file) = find_library_from_list(lib, files) if libname is not None: v['LIBPATH_BOOST_' + lib.upper()] = [os.path.split(file)[0]] if self.env['CC_NAME'] == 'msvc' and os.path.splitext(file)[1] == '.lib': v[st_env_prefix + '_BOOST_' + lib.upper()] = ['libboost_'+libname] else: v[st_env_prefix + '_BOOST_' + lib.upper()] = ['boost_'+libname] return self.fatal('lib boost_' + lib + ' not found!') @conf def check_boost(self, *k, **kw): """ This should be the main entry point - min_version - max_version - version - include_path - lib_path - lib - toolsettag - None or a regexp - threadingtag - None or a regexp - abitag - None or a regexp - versiontag - WARNING: you should rather use version or min_version/max_version - static - look for static libs (values: 'nostatic' or STATIC_NOSTATIC - ignore static libs (default) 'both' or STATIC_BOTH - find static libs, too 'onlystatic' or STATIC_ONLYSTATIC - find only static libs - score_version - score_abi - scores_threading - score_toolset * the scores are tuples (match_score, nomatch_score) match_score is the added to the score if the tag is matched nomatch_score is added when a tag is found and does not match - min_score """ if not self.env['CXX']: self.fatal('load a c++ compiler tool first, for example conf.check_tool("g++")') self.validate_boost(kw) ret = None try: if not kw.get('found_includes', None): self.check_message_1(kw.get('msg_includes', 'boost headers')) ret = self.find_boost_includes(kw) except Configure.ConfigurationError, e: if 'errmsg' in kw: self.check_message_2(kw['errmsg'], 'YELLOW') if 'mandatory' in kw: if Logs.verbose > 1: raise else: self.fatal('the configuration failed (see %r)' % self.log.name) else: if 'okmsg' in kw: self.check_message_2(kw.get('okmsg_includes', ret)) for lib in kw['lib']: self.check_message_1('library boost_'+lib) try: self.find_boost_library(lib, kw) except Configure.ConfigurationError, e: ret = False if 'errmsg' in kw: self.check_message_2(kw['errmsg'], 'YELLOW') if 'mandatory' in kw: if Logs.verbose > 1: raise else: self.fatal('the configuration failed (see %r)' % self.log.name) else: if 'okmsg' in kw: self.check_message_2(kw['okmsg']) return ret ntdb-1.0/buildtools/wafadmin/3rdparty/fluid.py000066400000000000000000000015431224151530700214770ustar00rootroot00000000000000#!/usr/bin/python # encoding: utf-8 # Grygoriy Fuchedzhy 2009 """ Compile fluid files (fltk graphic library). Use the 'fluid' feature in conjuction with the 'cxx' feature. """ import Task from TaskGen import extension Task.simple_task_type('fluid', '${FLUID} -c -o ${TGT[0].abspath(env)} -h ${TGT[1].abspath(env)} ${SRC}', 'BLUE', shell=False, ext_out='.cxx') @extension('.fl') def fluid(self, node): """add the .fl to the source list; the cxx file generated will be compiled when possible""" cpp = node.change_ext('.cpp') hpp = node.change_ext('.hpp') self.create_task('fluid', node, [cpp, hpp]) if 'cxx' in self.features: self.allnodes.append(cpp) def detect(conf): fluid = conf.find_program('fluid', var='FLUID', mandatory=True) conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True) ntdb-1.0/buildtools/wafadmin/3rdparty/gccdeps.py000066400000000000000000000054231224151530700220050ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2010 (ita) """ Execute the tasks with gcc -MD, read the dependencies from the .d file and prepare the dependency calculation for the next run """ import os, re, threading import Task, Logs, Utils, preproc from TaskGen import before, after, feature lock = threading.Lock() preprocessor_flag = '-MD' @feature('cc') @before('apply_core') def add_mmd_cc(self): if self.env.get_flat('CCFLAGS').find(preprocessor_flag) < 0: self.env.append_value('CCFLAGS', preprocessor_flag) @feature('cxx') @before('apply_core') def add_mmd_cxx(self): if self.env.get_flat('CXXFLAGS').find(preprocessor_flag) < 0: self.env.append_value('CXXFLAGS', preprocessor_flag) def scan(self): "the scanner does not do anything initially" nodes = self.generator.bld.node_deps.get(self.unique_id(), []) names = [] return (nodes, names) re_o = re.compile("\.o$") re_src = re.compile("^(\.\.)[\\/](.*)$") def post_run(self): # The following code is executed by threads, it is not safe, so a lock is needed... if getattr(self, 'cached', None): return Task.Task.post_run(self) name = self.outputs[0].abspath(self.env) name = re_o.sub('.d', name) txt = Utils.readf(name) #os.unlink(name) txt = txt.replace('\\\n', '') lst = txt.strip().split(':') val = ":".join(lst[1:]) val = val.split() nodes = [] bld = self.generator.bld f = re.compile("^("+self.env.variant()+"|\.\.)[\\/](.*)$") for x in val: if os.path.isabs(x): if not preproc.go_absolute: continue lock.acquire() try: node = bld.root.find_resource(x) finally: lock.release() else: g = re.search(re_src, x) if g: x = g.group(2) lock.acquire() try: node = bld.bldnode.parent.find_resource(x) finally: lock.release() else: g = re.search(f, x) if g: x = g.group(2) lock.acquire() try: node = bld.srcnode.find_resource(x) finally: lock.release() if id(node) == id(self.inputs[0]): # ignore the source file, it is already in the dependencies # this way, successful config tests may be retrieved from the cache continue if not node: raise ValueError('could not find %r for %r' % (x, self)) else: nodes.append(node) Logs.debug('deps: real scanner for %s returned %s' % (str(self), str(nodes))) bld.node_deps[self.unique_id()] = nodes bld.raw_deps[self.unique_id()] = [] try: del self.cache_sig except: pass Task.Task.post_run(self) import Constants, Utils def sig_implicit_deps(self): try: return Task.Task.sig_implicit_deps(self) except Utils.WafError: return Constants.SIG_NIL for name in 'cc cxx'.split(): try: cls = Task.TaskBase.classes[name] except KeyError: pass else: cls.post_run = post_run cls.scan = scan cls.sig_implicit_deps = sig_implicit_deps ntdb-1.0/buildtools/wafadmin/3rdparty/go.py000066400000000000000000000066211224151530700210030ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # go.py - Waf tool for the Go programming language # By: Tom Wambold import platform, os import Task import Utils from TaskGen import feature, extension, after Task.simple_task_type('gocompile', '${GOC} ${GOCFLAGS} -o ${TGT} ${SRC}', shell=False) Task.simple_task_type('gopack', '${GOP} grc ${TGT} ${SRC}', shell=False) Task.simple_task_type('golink', '${GOL} ${GOLFLAGS} -o ${TGT} ${SRC}', shell=False) def detect(conf): def set_def(var, val): if not conf.env[var]: conf.env[var] = val goarch = os.getenv("GOARCH") if goarch == '386': set_def('GO_PLATFORM', 'i386') elif goarch == 'amd64': set_def('GO_PLATFORM', 'x86_64') elif goarch == 'arm': set_def('GO_PLATFORM', 'arm') else: set_def('GO_PLATFORM', platform.machine()) if conf.env.GO_PLATFORM == 'x86_64': set_def('GO_COMPILER', '6g') set_def('GO_LINKER', '6l') set_def('GO_EXTENSION', '.6') elif conf.env.GO_PLATFORM in ['i386', 'i486', 'i586', 'i686']: set_def('GO_COMPILER', '8g') set_def('GO_LINKER', '8l') set_def('GO_EXTENSION', '.8') elif conf.env.GO_PLATFORM == 'arm': set_def('GO_COMPILER', '5g') set_def('GO_LINKER', '5l') set_def('GO_EXTENSION', '.5') if not (conf.env.GO_COMPILER or conf.env.GO_LINKER or conf.env.GO_EXTENSION): raise conf.fatal('Unsupported platform ' + platform.machine()) set_def('GO_PACK', 'gopack') set_def('GO_PACK_EXTENSION', '.a') conf.find_program(conf.env.GO_COMPILER, var='GOC', mandatory=True) conf.find_program(conf.env.GO_LINKER, var='GOL', mandatory=True) conf.find_program(conf.env.GO_PACK, var='GOP', mandatory=True) conf.find_program('cgo', var='CGO', mandatory=True) @extension('.go') def compile_go(self, node): try: self.go_nodes.append(node) except AttributeError: self.go_nodes = [node] @feature('go') @after('apply_core') def apply_compile_go(self): try: nodes = self.go_nodes except AttributeError: self.go_compile_task = None else: self.go_compile_task = self.create_task('gocompile', nodes, [self.path.find_or_declare(self.target + self.env.GO_EXTENSION)]) @feature('gopackage', 'goprogram') @after('apply_compile_go') def apply_goinc(self): if not getattr(self, 'go_compile_task', None): return names = self.to_list(getattr(self, 'uselib_local', [])) for name in names: obj = self.name_to_obj(name) if not obj: raise Utils.WafError('object %r was not found in uselib_local ' '(required by %r)' % (lib_name, self.name)) obj.post() self.go_compile_task.set_run_after(obj.go_package_task) self.go_compile_task.dep_nodes.extend(obj.go_package_task.outputs) self.env.append_unique('GOCFLAGS', '-I' + obj.path.abspath(obj.env)) self.env.append_unique('GOLFLAGS', '-L' + obj.path.abspath(obj.env)) @feature('gopackage') @after('apply_goinc') def apply_gopackage(self): self.go_package_task = self.create_task('gopack', self.go_compile_task.outputs[0], self.path.find_or_declare(self.target + self.env.GO_PACK_EXTENSION)) self.go_package_task.set_run_after(self.go_compile_task) self.go_package_task.dep_nodes.extend(self.go_compile_task.outputs) @feature('goprogram') @after('apply_goinc') def apply_golink(self): self.go_link_task = self.create_task('golink', self.go_compile_task.outputs[0], self.path.find_or_declare(self.target)) self.go_link_task.set_run_after(self.go_compile_task) self.go_link_task.dep_nodes.extend(self.go_compile_task.outputs) ntdb-1.0/buildtools/wafadmin/3rdparty/lru_cache.py000066400000000000000000000045641224151530700223270ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy 2011 import os, shutil, re import Options, Build, Logs """ Apply a least recently used policy to the Waf cache. For performance reasons, it is called after the build is complete. We assume that the the folders are written atomically Do export WAFCACHE=/tmp/foo-xyz where xyz represents the cache size in megabytes If missing, the default cache size will be set to 10GB """ re_num = re.compile('[a-zA-Z_]+(\d+)') CACHESIZE = 10*1024*1024*1024 # in bytes CLEANRATIO = 0.8 DIRSIZE = 4096 def compile(self): if Options.cache_global and not Options.options.nocache: try: os.makedirs(Options.cache_global) except: pass try: self.raw_compile() finally: if Options.cache_global and not Options.options.nocache: self.sweep() def sweep(self): global CACHESIZE CACHEDIR = Options.cache_global # get the cache max size from the WAFCACHE filename re_num = re.compile('[a-zA-Z_]+(\d+)') val = re_num.sub('\\1', os.path.basename(Options.cache_global)) try: CACHESIZE = int(val) except: pass # map folder names to timestamps flist = {} for x in os.listdir(CACHEDIR): j = os.path.join(CACHEDIR, x) if os.path.isdir(j) and len(x) == 32: # dir names are md5 hexdigests flist[x] = [os.stat(j).st_mtime, 0] for (x, v) in flist.items(): cnt = DIRSIZE # each entry takes 4kB d = os.path.join(CACHEDIR, x) for k in os.listdir(d): cnt += os.stat(os.path.join(d, k)).st_size flist[x][1] = cnt total = sum([x[1] for x in flist.values()]) Logs.debug('lru: Cache size is %r' % total) if total >= CACHESIZE: Logs.debug('lru: Trimming the cache since %r > %r' % (total, CACHESIZE)) # make a list to sort the folders by timestamp lst = [(p, v[0], v[1]) for (p, v) in flist.items()] lst.sort(key=lambda x: x[1]) # sort by timestamp lst.reverse() while total >= CACHESIZE * CLEANRATIO: (k, t, s) = lst.pop() p = os.path.join(CACHEDIR, k) v = p + '.del' try: os.rename(p, v) except: # someone already did it pass else: try: shutil.rmtree(v) except: # this should not happen, but who knows? Logs.warn('If you ever see this message, report it (%r)' % v) total -= s del flist[k] Logs.debug('lru: Total at the end %r' % total) Build.BuildContext.raw_compile = Build.BuildContext.compile Build.BuildContext.compile = compile Build.BuildContext.sweep = sweep ntdb-1.0/buildtools/wafadmin/3rdparty/paranoid.py000066400000000000000000000015761224151530700221770ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # ita 2010 import Logs, Utils, Build, Task def say(txt): Logs.warn("^o^: %s" % txt) try: ret = Utils.cmd_output('which cowsay 2> /dev/null').strip() except Exception, e: pass else: def say(txt): f = Utils.cmd_output([ret, txt]) Utils.pprint('PINK', f) say('you make the errors, we detect them') def check_task_classes(self): for x in Task.TaskBase.classes: if isinstance(x, Task.Task): if not getattr(cls, 'ext_in', None) or getattr(cls, 'before', None): say('class %s has no precedence constraints (ext_in/before)') if not getattr(cls, 'ext_out', None) or getattr(cls, 'after', None): say('class %s has no precedence constraints (ext_out/after)') comp = Build.BuildContext.compile def compile(self): if not getattr(self, 'magic', None): check_task_classes(self) return comp(self) Build.BuildContext.compile = compile ntdb-1.0/buildtools/wafadmin/3rdparty/swig.py000066400000000000000000000116701224151530700213470ustar00rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Petar Forai # Thomas Nagy 2008 import re import Task, Utils, Logs from TaskGen import extension from Configure import conf import preproc """ Welcome in the hell of adding tasks dynamically swig interface files may be created at runtime, the module name may be unknown in advance rev 5859 is much more simple """ SWIG_EXTS = ['.swig', '.i'] swig_str = '${SWIG} ${SWIGFLAGS} ${_CCINCFLAGS} ${_CXXINCFLAGS} ${_CCDEFFLAGS} ${_CXXDEFFLAGS} ${SRC}' cls = Task.simple_task_type('swig', swig_str, color='BLUE', ext_in='.i .h', ext_out='.o .c .cxx', shell=False) def runnable_status(self): for t in self.run_after: if not t.hasrun: return ASK_LATER if not getattr(self, 'init_outputs', None): self.init_outputs = True if not getattr(self, 'module', None): # search the module name txt = self.inputs[0].read(self.env) m = re_module.search(txt) if not m: raise ValueError("could not find the swig module name") self.module = m.group(1) swig_c(self) # add the language-specific output files as nodes # call funs in the dict swig_langs for x in self.env['SWIGFLAGS']: # obtain the language x = x[1:] try: fun = swig_langs[x] except KeyError: pass else: fun(self) return Task.Task.runnable_status(self) setattr(cls, 'runnable_status', runnable_status) re_module = re.compile('%module(?:\s*\(.*\))?\s+(.+)', re.M) re_1 = re.compile(r'^%module.*?\s+([\w]+)\s*?$', re.M) re_2 = re.compile('%include "(.*)"', re.M) re_3 = re.compile('#include "(.*)"', re.M) def scan(self): "scan for swig dependencies, climb the .i files" env = self.env lst_src = [] seen = [] to_see = [self.inputs[0]] while to_see: node = to_see.pop(0) if node.id in seen: continue seen.append(node.id) lst_src.append(node) # read the file code = node.read(env) code = preproc.re_nl.sub('', code) code = preproc.re_cpp.sub(preproc.repl, code) # find .i files and project headers names = re_2.findall(code) + re_3.findall(code) for n in names: for d in self.generator.env.INC_PATHS + [node.parent]: u = d.find_resource(n) if u: to_see.append(u) break else: Logs.warn('could not find %r' % n) # list of nodes this one depends on, and module name if present if Logs.verbose: Logs.debug('deps: deps for %s: %s' % (str(self), str(lst_src))) return (lst_src, []) cls.scan = scan # provide additional language processing swig_langs = {} def swig(fun): swig_langs[fun.__name__.replace('swig_', '')] = fun def swig_c(self): ext = '.swigwrap_%d.c' % self.generator.idx flags = self.env['SWIGFLAGS'] if '-c++' in flags: ext += 'xx' out_node = self.inputs[0].parent.find_or_declare(self.module + ext) try: if '-c++' in flags: fun = self.generator.cxx_hook else: fun = self.generator.c_hook except AttributeError: raise Utils.WafError('No c%s compiler was found to process swig files' % ('-c++' in flags and '++' or '')) task = fun(out_node) task.set_run_after(self) ge = self.generator.bld.generator ge.outstanding.insert(0, task) ge.total += 1 try: ltask = self.generator.link_task except AttributeError: pass else: ltask.inputs.append(task.outputs[0]) self.outputs.append(out_node) if not '-o' in self.env['SWIGFLAGS']: self.env.append_value('SWIGFLAGS', '-o') self.env.append_value('SWIGFLAGS', self.outputs[0].abspath(self.env)) @swig def swig_python(tsk): tsk.set_outputs(tsk.inputs[0].parent.find_or_declare(tsk.module + '.py')) @swig def swig_ocaml(tsk): tsk.set_outputs(tsk.inputs[0].parent.find_or_declare(tsk.module + '.ml')) tsk.set_outputs(tsk.inputs[0].parent.find_or_declare(tsk.module + '.mli')) @extension(SWIG_EXTS) def i_file(self, node): # the task instance tsk = self.create_task('swig') tsk.set_inputs(node) tsk.module = getattr(self, 'swig_module', None) flags = self.to_list(getattr(self, 'swig_flags', [])) self.env.append_value('SWIGFLAGS', flags) if not '-outdir' in flags: flags.append('-outdir') flags.append(node.parent.abspath(self.env)) @conf def check_swig_version(conf, minver=None): """Check for a minimum swig version like conf.check_swig_version('1.3.28') or conf.check_swig_version((1,3,28)) """ reg_swig = re.compile(r'SWIG Version\s(.*)', re.M) swig_out = Utils.cmd_output('%s -version' % conf.env['SWIG']) swigver = [int(s) for s in reg_swig.findall(swig_out)[0].split('.')] if isinstance(minver, basestring): minver = [int(s) for s in minver.split(".")] if isinstance(minver, tuple): minver = [int(s) for s in minver] result = (minver is None) or (minver[:3] <= swigver[:3]) swigver_full = '.'.join(map(str, swigver)) if result: conf.env['SWIG_VERSION'] = swigver_full minver_str = '.'.join(map(str, minver)) if minver is None: conf.check_message_custom('swig version', '', swigver_full) else: conf.check_message('swig version', '>= %s' % (minver_str,), result, option=swigver_full) return result def detect(conf): swig = conf.find_program('swig', var='SWIG', mandatory=True) ntdb-1.0/buildtools/wafadmin/3rdparty/valadoc.py000066400000000000000000000071661224151530700220140ustar00rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Nicolas Joseph 2009 from fnmatch import fnmatchcase import os, os.path, re, stat import Task, Utils, Node, Constants from TaskGen import feature, extension, after from Logs import debug, warn, error VALADOC_STR = '${VALADOC}' class valadoc_task(Task.Task): vars = ['VALADOC', 'VALADOCFLAGS'] color = 'BLUE' after = 'cxx_link cc_link' quiet = True output_dir = '' doclet = '' package_name = '' package_version = '' files = [] protected = True private = False inherit = False deps = False enable_non_null_experimental = False force = False def runnable_status(self): return True def run(self): if self.env['VALADOC']: if not self.env['VALADOCFLAGS']: self.env['VALADOCFLAGS'] = '' cmd = [Utils.subst_vars(VALADOC_STR, self.env)] cmd.append ('-o %s' % self.output_dir) if getattr(self, 'doclet', None): cmd.append ('--doclet %s' % self.doclet) cmd.append ('--package-name %s' % self.package_name) if getattr(self, 'version', None): cmd.append ('--package-version %s' % self.package_version) if getattr(self, 'packages', None): for package in self.packages: cmd.append ('--pkg %s' % package) if getattr(self, 'vapi_dirs', None): for vapi_dir in self.vapi_dirs: cmd.append ('--vapidir %s' % vapi_dir) if not getattr(self, 'protected', None): cmd.append ('--no-protected') if getattr(self, 'private', None): cmd.append ('--private') if getattr(self, 'inherit', None): cmd.append ('--inherit') if getattr(self, 'deps', None): cmd.append ('--deps') if getattr(self, 'enable_non_null_experimental', None): cmd.append ('--enable-non-null-experimental') if getattr(self, 'force', None): cmd.append ('--force') cmd.append (' '.join ([x.relpath_gen (self.generator.bld.bldnode) for x in self.files])) return self.generator.bld.exec_command(' '.join(cmd)) else: error ('You must install valadoc for generate the API documentation') return -1 @feature('valadoc') def process_valadoc(self): task = getattr(self, 'task', None) if not task: task = self.create_task('valadoc') self.task = task if getattr(self, 'output_dir', None): task.output_dir = self.output_dir else: Utils.WafError('no output directory') if getattr(self, 'doclet', None): task.doclet = self.doclet else: Utils.WafError('no doclet directory') if getattr(self, 'package_name', None): task.package_name = self.package_name else: Utils.WafError('no package name') if getattr(self, 'package_version', None): task.package_version = self.package_version if getattr(self, 'packages', None): task.packages = Utils.to_list(self.packages) if getattr(self, 'vapi_dirs', None): task.vapi_dirs = Utils.to_list(self.vapi_dirs) if getattr(self, 'files', None): task.files = self.files else: Utils.WafError('no input file') if getattr(self, 'protected', None): task.protected = self.protected if getattr(self, 'private', None): task.private = self.private if getattr(self, 'inherit', None): task.inherit = self.inherit if getattr(self, 'deps', None): task.deps = self.deps if getattr(self, 'enable_non_null_experimental', None): task.enable_non_null_experimental = self.enable_non_null_experimental if getattr(self, 'force', None): task.force = self.force def detect(conf): conf.find_program('valadoc', var='VALADOC', mandatory=False) ntdb-1.0/buildtools/wafadmin/Build.py000066400000000000000000000673621224151530700176760ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005 (ita) """ Dependency tree holder The class Build holds all the info related to a build: * file system representation (tree of Node instances) * various cached objects (task signatures, file scan results, ..) There is only one Build object at a time (bld singleton) """ import os, sys, errno, re, glob, gc, datetime, shutil try: import cPickle except: import pickle as cPickle import Runner, TaskGen, Node, Scripting, Utils, Environment, Task, Logs, Options from Logs import debug, error, info from Constants import * SAVED_ATTRS = 'root srcnode bldnode node_sigs node_deps raw_deps task_sigs id_nodes'.split() "Build class members to save" bld = None "singleton - safe to use when Waf is not used as a library" class BuildError(Utils.WafError): def __init__(self, b=None, t=[]): self.bld = b self.tasks = t self.ret = 1 Utils.WafError.__init__(self, self.format_error()) def format_error(self): lst = ['Build failed:'] for tsk in self.tasks: txt = tsk.format_error() if txt: lst.append(txt) sep = ' ' if len(lst) > 2: sep = '\n' return sep.join(lst) def group_method(fun): """ sets a build context method to execute after the current group has finished executing this is useful for installing build files: * calling install_files/install_as will fail if called too early * people do not want to define install method in their task classes TODO: try it """ def f(*k, **kw): if not k[0].is_install: return False postpone = True if 'postpone' in kw: postpone = kw['postpone'] del kw['postpone'] # TODO waf 1.6 in theory there should be no reference to the TaskManager internals here if postpone: m = k[0].task_manager if not m.groups: m.add_group() m.groups[m.current_group].post_funs.append((fun, k, kw)) if not 'cwd' in kw: kw['cwd'] = k[0].path else: fun(*k, **kw) return f class BuildContext(Utils.Context): "holds the dependency tree" def __init__(self): # not a singleton, but provided for compatibility global bld bld = self self.task_manager = Task.TaskManager() # instead of hashing the nodes, we assign them a unique id when they are created self.id_nodes = 0 self.idx = {} # map names to environments, the 'default' must be defined self.all_envs = {} # ======================================= # # code for reading the scripts # project build directory - do not reset() from load_dirs() self.bdir = '' # the current directory from which the code is run # the folder changes everytime a wscript is read self.path = None # Manual dependencies. self.deps_man = Utils.DefaultDict(list) # ======================================= # # cache variables # local cache for absolute paths - cache_node_abspath[variant][node] self.cache_node_abspath = {} # list of folders that are already scanned # so that we do not need to stat them one more time self.cache_scanned_folders = {} # list of targets to uninstall for removing the empty folders after uninstalling self.uninstall = [] # ======================================= # # tasks and objects # build dir variants (release, debug, ..) for v in 'cache_node_abspath task_sigs node_deps raw_deps node_sigs'.split(): var = {} setattr(self, v, var) self.cache_dir_contents = {} self.all_task_gen = [] self.task_gen_cache_names = {} self.cache_sig_vars = {} self.log = None self.root = None self.srcnode = None self.bldnode = None # bind the build context to the nodes in use # this means better encapsulation and no build context singleton class node_class(Node.Node): pass self.node_class = node_class self.node_class.__module__ = "Node" self.node_class.__name__ = "Nodu" self.node_class.bld = self self.is_install = None def __copy__(self): "nodes are not supposed to be copied" raise Utils.WafError('build contexts are not supposed to be cloned') def load(self): "load the cache from the disk" try: env = Environment.Environment(os.path.join(self.cachedir, 'build.config.py')) except (IOError, OSError): pass else: if env['version'] < HEXVERSION: raise Utils.WafError('Version mismatch! reconfigure the project') for t in env['tools']: self.setup(**t) try: gc.disable() f = data = None Node.Nodu = self.node_class try: f = open(os.path.join(self.bdir, DBFILE), 'rb') except (IOError, EOFError): # handle missing file/empty file pass try: if f: data = cPickle.load(f) except AttributeError: # handle file of an old Waf version # that has an attribute which no longer exist # (e.g. AttributeError: 'module' object has no attribute 'BuildDTO') if Logs.verbose > 1: raise if data: for x in SAVED_ATTRS: setattr(self, x, data[x]) else: debug('build: Build cache loading failed') finally: if f: f.close() gc.enable() def save(self): "store the cache on disk, see self.load" gc.disable() self.root.__class__.bld = None # some people are very nervous with ctrl+c so we have to make a temporary file Node.Nodu = self.node_class db = os.path.join(self.bdir, DBFILE) file = open(db + '.tmp', 'wb') data = {} for x in SAVED_ATTRS: data[x] = getattr(self, x) cPickle.dump(data, file, -1) file.close() # do not use shutil.move try: os.unlink(db) except OSError: pass os.rename(db + '.tmp', db) self.root.__class__.bld = self gc.enable() # ======================================= # def clean(self): debug('build: clean called') # does not clean files created during the configuration precious = set([]) for env in self.all_envs.values(): for x in env[CFG_FILES]: node = self.srcnode.find_resource(x) if node: precious.add(node.id) def clean_rec(node): for x in list(node.childs.keys()): nd = node.childs[x] tp = nd.id & 3 if tp == Node.DIR: clean_rec(nd) elif tp == Node.BUILD: if nd.id in precious: continue for env in self.all_envs.values(): try: os.remove(nd.abspath(env)) except OSError: pass node.childs.__delitem__(x) clean_rec(self.srcnode) for v in 'node_sigs node_deps task_sigs raw_deps cache_node_abspath'.split(): setattr(self, v, {}) def compile(self): """The cache file is not written if nothing was build at all (build is up to date)""" debug('build: compile called') """ import cProfile, pstats cProfile.run("import Build\nBuild.bld.flush()", 'profi.txt') p = pstats.Stats('profi.txt') p.sort_stats('cumulative').print_stats(80) """ self.flush() #""" self.generator = Runner.Parallel(self, Options.options.jobs) def dw(on=True): if Options.options.progress_bar: if on: sys.stderr.write(Logs.colors.cursor_on) else: sys.stderr.write(Logs.colors.cursor_off) debug('build: executor starting') back = os.getcwd() os.chdir(self.bldnode.abspath()) try: try: dw(on=False) self.generator.start() except KeyboardInterrupt: dw() # if self.generator.processed != 1: TODO self.save() raise except Exception: dw() # do not store anything, for something bad happened raise else: dw() #if self.generator.processed != 1: TODO self.save() if self.generator.error: raise BuildError(self, self.task_manager.tasks_done) finally: os.chdir(back) def install(self): "this function is called for both install and uninstall" debug('build: install called') self.flush() # remove empty folders after uninstalling if self.is_install < 0: lst = [] for x in self.uninstall: dir = os.path.dirname(x) if not dir in lst: lst.append(dir) lst.sort() lst.reverse() nlst = [] for y in lst: x = y while len(x) > 4: if not x in nlst: nlst.append(x) x = os.path.dirname(x) nlst.sort() nlst.reverse() for x in nlst: try: os.rmdir(x) except OSError: pass def new_task_gen(self, *k, **kw): if self.task_gen_cache_names: self.task_gen_cache_names = {} kw['bld'] = self if len(k) == 0: ret = TaskGen.task_gen(*k, **kw) else: cls_name = k[0] try: cls = TaskGen.task_gen.classes[cls_name] except KeyError: raise Utils.WscriptError('%s is not a valid task generator -> %s' % (cls_name, [x for x in TaskGen.task_gen.classes])) ret = cls(*k, **kw) return ret def __call__(self, *k, **kw): if self.task_gen_cache_names: self.task_gen_cache_names = {} kw['bld'] = self return TaskGen.task_gen(*k, **kw) def load_envs(self): try: lst = Utils.listdir(self.cachedir) except OSError, e: if e.errno == errno.ENOENT: raise Utils.WafError('The project was not configured: run "waf configure" first!') else: raise if not lst: raise Utils.WafError('The cache directory is empty: reconfigure the project') for file in lst: if file.endswith(CACHE_SUFFIX): env = Environment.Environment(os.path.join(self.cachedir, file)) name = file[:-len(CACHE_SUFFIX)] self.all_envs[name] = env self.init_variants() for env in self.all_envs.values(): for f in env[CFG_FILES]: newnode = self.path.find_or_declare(f) try: hash = Utils.h_file(newnode.abspath(env)) except (IOError, AttributeError): error("cannot find "+f) hash = SIG_NIL self.node_sigs[env.variant()][newnode.id] = hash # TODO: hmmm, these nodes are removed from the tree when calling rescan() self.bldnode = self.root.find_dir(self.bldnode.abspath()) self.path = self.srcnode = self.root.find_dir(self.srcnode.abspath()) self.cwd = self.bldnode.abspath() def setup(self, tool, tooldir=None, funs=None): "setup tools for build process" if isinstance(tool, list): for i in tool: self.setup(i, tooldir) return if not tooldir: tooldir = Options.tooldir module = Utils.load_tool(tool, tooldir) if hasattr(module, "setup"): module.setup(self) def init_variants(self): debug('build: init variants') lstvariants = [] for env in self.all_envs.values(): if not env.variant() in lstvariants: lstvariants.append(env.variant()) self.lst_variants = lstvariants debug('build: list of variants is %r', lstvariants) for name in lstvariants+[0]: for v in 'node_sigs cache_node_abspath'.split(): var = getattr(self, v) if not name in var: var[name] = {} # ======================================= # # node and folder handling # this should be the main entry point def load_dirs(self, srcdir, blddir, load_cache=1): "this functions should be the start of everything" assert(os.path.isabs(srcdir)) assert(os.path.isabs(blddir)) self.cachedir = os.path.join(blddir, CACHE_DIR) if srcdir == blddir: raise Utils.WafError("build dir must be different from srcdir: %s <-> %s " % (srcdir, blddir)) self.bdir = blddir # try to load the cache file, if it does not exist, nothing happens self.load() if not self.root: Node.Nodu = self.node_class self.root = Node.Nodu('', None, Node.DIR) if not self.srcnode: self.srcnode = self.root.ensure_dir_node_from_path(srcdir) debug('build: srcnode is %s and srcdir %s', self.srcnode.name, srcdir) self.path = self.srcnode # create this build dir if necessary try: os.makedirs(blddir) except OSError: pass if not self.bldnode: self.bldnode = self.root.ensure_dir_node_from_path(blddir) self.init_variants() def rescan(self, src_dir_node): """ look the contents of a (folder)node and update its list of childs The intent is to perform the following steps * remove the nodes for the files that have disappeared * remove the signatures for the build files that have disappeared * cache the results of os.listdir * create the build folder equivalent (mkdir) for each variant src/bar -> build/default/src/bar, build/release/src/bar when a folder in the source directory is removed, we do not check recursively to remove the unused nodes. To do that, call 'waf clean' and build again. """ # do not rescan over and over again # TODO use a single variable in waf 1.6 if self.cache_scanned_folders.get(src_dir_node.id, None): return self.cache_scanned_folders[src_dir_node.id] = True # TODO remove in waf 1.6 if hasattr(self, 'repository'): self.repository(src_dir_node) if not src_dir_node.name and sys.platform == 'win32': # the root has no name, contains drive letters, and cannot be listed return # first, take the case of the source directory parent_path = src_dir_node.abspath() try: lst = set(Utils.listdir(parent_path)) except OSError: lst = set([]) # TODO move this at the bottom self.cache_dir_contents[src_dir_node.id] = lst # hash the existing source files, remove the others cache = self.node_sigs[0] for x in src_dir_node.childs.values(): if x.id & 3 != Node.FILE: continue if x.name in lst: try: cache[x.id] = Utils.h_file(x.abspath()) except IOError: raise Utils.WafError('The file %s is not readable or has become a dir' % x.abspath()) else: try: del cache[x.id] except KeyError: pass del src_dir_node.childs[x.name] # first obtain the differences between srcnode and src_dir_node h1 = self.srcnode.height() h2 = src_dir_node.height() lst = [] child = src_dir_node while h2 > h1: lst.append(child.name) child = child.parent h2 -= 1 lst.reverse() # list the files in the build dirs try: for variant in self.lst_variants: sub_path = os.path.join(self.bldnode.abspath(), variant , *lst) self.listdir_bld(src_dir_node, sub_path, variant) except OSError: # listdir failed, remove the build node signatures for all variants for node in src_dir_node.childs.values(): if node.id & 3 != Node.BUILD: continue for dct in self.node_sigs.values(): if node.id in dct: dct.__delitem__(node.id) # the policy is to avoid removing nodes representing directories src_dir_node.childs.__delitem__(node.name) for variant in self.lst_variants: sub_path = os.path.join(self.bldnode.abspath(), variant , *lst) try: os.makedirs(sub_path) except OSError: pass # ======================================= # def listdir_src(self, parent_node): """do not use, kept for compatibility""" pass def remove_node(self, node): """do not use, kept for compatibility""" pass def listdir_bld(self, parent_node, path, variant): """in this method we do not add timestamps but we remove them when the files no longer exist (file removed in the build dir)""" i_existing_nodes = [x for x in parent_node.childs.values() if x.id & 3 == Node.BUILD] lst = set(Utils.listdir(path)) node_names = set([x.name for x in i_existing_nodes]) remove_names = node_names - lst # remove the stamps of the build nodes that no longer exist on the filesystem ids_to_remove = [x.id for x in i_existing_nodes if x.name in remove_names] cache = self.node_sigs[variant] for nid in ids_to_remove: if nid in cache: cache.__delitem__(nid) def get_env(self): return self.env_of_name('default') def set_env(self, name, val): self.all_envs[name] = val env = property(get_env, set_env) def add_manual_dependency(self, path, value): if isinstance(path, Node.Node): node = path elif os.path.isabs(path): node = self.root.find_resource(path) else: node = self.path.find_resource(path) self.deps_man[node.id].append(value) def launch_node(self): """return the launch directory as a node""" # p_ln is kind of private, but public in case if try: return self.p_ln except AttributeError: self.p_ln = self.root.find_dir(Options.launch_dir) return self.p_ln def glob(self, pattern, relative=True): "files matching the pattern, seen from the current folder" path = self.path.abspath() files = [self.root.find_resource(x) for x in glob.glob(path+os.sep+pattern)] if relative: files = [x.path_to_parent(self.path) for x in files if x] else: files = [x.abspath() for x in files if x] return files ## the following methods are candidates for the stable apis ## def add_group(self, *k): self.task_manager.add_group(*k) def set_group(self, *k, **kw): self.task_manager.set_group(*k, **kw) def hash_env_vars(self, env, vars_lst): """hash environment variables ['CXX', ..] -> [env['CXX'], ..] -> md5()""" # ccroot objects use the same environment for building the .o at once # the same environment and the same variables are used idx = str(id(env)) + str(vars_lst) try: return self.cache_sig_vars[idx] except KeyError: pass lst = [str(env[a]) for a in vars_lst] ret = Utils.h_list(lst) debug('envhash: %r %r', ret, lst) # next time self.cache_sig_vars[idx] = ret return ret def name_to_obj(self, name, env): """retrieve a task generator from its name or its target name remember that names must be unique""" cache = self.task_gen_cache_names if not cache: # create the index lazily for x in self.all_task_gen: vt = x.env.variant() + '_' if x.name: cache[vt + x.name] = x else: if isinstance(x.target, str): target = x.target else: target = ' '.join(x.target) v = vt + target if not cache.get(v, None): cache[v] = x return cache.get(env.variant() + '_' + name, None) def flush(self, all=1): """tell the task generators to create the tasks""" self.ini = datetime.datetime.now() # force the initialization of the mapping name->object in flush # name_to_obj can be used in userland scripts, in that case beware of incomplete mapping self.task_gen_cache_names = {} self.name_to_obj('', self.env) debug('build: delayed operation TaskGen.flush() called') if Options.options.compile_targets: debug('task_gen: posting objects %r listed in compile_targets', Options.options.compile_targets) mana = self.task_manager to_post = [] min_grp = 0 # ensure the target names exist, fail before any post() target_objects = Utils.DefaultDict(list) for target_name in Options.options.compile_targets.split(','): # trim target_name (handle cases when the user added spaces to targets) target_name = target_name.strip() for env in self.all_envs.values(): tg = self.name_to_obj(target_name, env) if tg: target_objects[target_name].append(tg) m = mana.group_idx(tg) if m > min_grp: min_grp = m to_post = [tg] elif m == min_grp: to_post.append(tg) if not target_name in target_objects and all: raise Utils.WafError("target '%s' does not exist" % target_name) debug('group: Forcing up to group %s for target %s', mana.group_name(min_grp), Options.options.compile_targets) # post all the task generators in previous groups for i in xrange(len(mana.groups)): mana.current_group = i if i == min_grp: break g = mana.groups[i] debug('group: Forcing group %s', mana.group_name(g)) for t in g.tasks_gen: debug('group: Posting %s', t.name or t.target) t.post() # then post the task generators listed in compile_targets in the last group for t in to_post: t.post() else: debug('task_gen: posting objects (normal)') ln = self.launch_node() # if the build is started from the build directory, do as if it was started from the top-level # for the pretty-printing (Node.py), the two lines below cannot be moved to Build::launch_node if ln.is_child_of(self.bldnode) or not ln.is_child_of(self.srcnode): ln = self.srcnode # if the project file is located under the source directory, build all targets by default # else 'waf configure build' does nothing proj_node = self.root.find_dir(os.path.split(Utils.g_module.root_path)[0]) if proj_node.id != self.srcnode.id: ln = self.srcnode for i in xrange(len(self.task_manager.groups)): g = self.task_manager.groups[i] self.task_manager.current_group = i if Logs.verbose: groups = [x for x in self.task_manager.groups_names if id(self.task_manager.groups_names[x]) == id(g)] name = groups and groups[0] or 'unnamed' Logs.debug('group: group', name) for tg in g.tasks_gen: if not tg.path.is_child_of(ln): continue if Logs.verbose: Logs.debug('group: %s' % tg) tg.post() def env_of_name(self, name): try: return self.all_envs[name] except KeyError: error('no such environment: '+name) return None def progress_line(self, state, total, col1, col2): n = len(str(total)) Utils.rot_idx += 1 ind = Utils.rot_chr[Utils.rot_idx % 4] ini = self.ini pc = (100.*state)/total eta = Utils.get_elapsed_time(ini) fs = "[%%%dd/%%%dd][%%s%%2d%%%%%%s][%s][" % (n, n, ind) left = fs % (state, total, col1, pc, col2) right = '][%s%s%s]' % (col1, eta, col2) cols = Utils.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2) if cols < 7: cols = 7 ratio = int((cols*state)/total) - 1 bar = ('='*ratio+'>').ljust(cols) msg = Utils.indicator % (left, bar, right) return msg # do_install is not used anywhere def do_install(self, src, tgt, chmod=O644): """returns true if the file was effectively installed or uninstalled, false otherwise""" if self.is_install > 0: if not Options.options.force: # check if the file is already there to avoid a copy try: st1 = os.stat(tgt) st2 = os.stat(src) except OSError: pass else: # same size and identical timestamps -> make no copy if st1.st_mtime >= st2.st_mtime and st1.st_size == st2.st_size: return False srclbl = src.replace(self.srcnode.abspath(None)+os.sep, '') info("* installing %s as %s" % (srclbl, tgt)) # following is for shared libs and stale inodes (-_-) try: os.remove(tgt) except OSError: pass try: shutil.copy2(src, tgt) os.chmod(tgt, chmod) except IOError: try: os.stat(src) except (OSError, IOError): error('File %r does not exist' % src) raise Utils.WafError('Could not install the file %r' % tgt) return True elif self.is_install < 0: info("* uninstalling %s" % tgt) self.uninstall.append(tgt) try: os.remove(tgt) except OSError, e: if e.errno != errno.ENOENT: if not getattr(self, 'uninstall_error', None): self.uninstall_error = True Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)') if Logs.verbose > 1: Logs.warn('could not remove %s (error code %r)' % (e.filename, e.errno)) return True red = re.compile(r"^([A-Za-z]:)?[/\\\\]*") def get_install_path(self, path, env=None): "installation path prefixed by the destdir, the variables like in '${PREFIX}/bin' are substituted" if not env: env = self.env destdir = env.get_destdir() path = path.replace('/', os.sep) destpath = Utils.subst_vars(path, env) if destdir: destpath = os.path.join(destdir, self.red.sub('', destpath)) return destpath def install_dir(self, path, env=None): """ create empty folders for the installation (very rarely used) """ if env: assert isinstance(env, Environment.Environment), "invalid parameter" else: env = self.env if not path: return [] destpath = self.get_install_path(path, env) if self.is_install > 0: info('* creating %s' % destpath) Utils.check_dir(destpath) elif self.is_install < 0: info('* removing %s' % destpath) self.uninstall.append(destpath + '/xxx') # yes, ugly def install_files(self, path, files, env=None, chmod=O644, relative_trick=False, cwd=None): """To install files only after they have been built, put the calls in a method named post_build on the top-level wscript The files must be a list and contain paths as strings or as Nodes The relative_trick flag can be set to install folders, use bld.path.ant_glob() with it """ if env: assert isinstance(env, Environment.Environment), "invalid parameter" else: env = self.env if not path: return [] if not cwd: cwd = self.path if isinstance(files, str) and '*' in files: gl = cwd.abspath() + os.sep + files lst = glob.glob(gl) else: lst = Utils.to_list(files) if not getattr(lst, '__iter__', False): lst = [lst] destpath = self.get_install_path(path, env) Utils.check_dir(destpath) installed_files = [] for filename in lst: if isinstance(filename, str) and os.path.isabs(filename): alst = Utils.split_path(filename) destfile = os.path.join(destpath, alst[-1]) else: if isinstance(filename, Node.Node): nd = filename else: nd = cwd.find_resource(filename) if not nd: raise Utils.WafError("Unable to install the file %r (not found in %s)" % (filename, cwd)) if relative_trick: destfile = os.path.join(destpath, filename) Utils.check_dir(os.path.dirname(destfile)) else: destfile = os.path.join(destpath, nd.name) filename = nd.abspath(env) if self.do_install(filename, destfile, chmod): installed_files.append(destfile) return installed_files def install_as(self, path, srcfile, env=None, chmod=O644, cwd=None): """ srcfile may be a string or a Node representing the file to install returns True if the file was effectively installed, False otherwise """ if env: assert isinstance(env, Environment.Environment), "invalid parameter" else: env = self.env if not path: raise Utils.WafError("where do you want to install %r? (%r?)" % (srcfile, path)) if not cwd: cwd = self.path destpath = self.get_install_path(path, env) dir, name = os.path.split(destpath) Utils.check_dir(dir) # the source path if isinstance(srcfile, Node.Node): src = srcfile.abspath(env) else: src = srcfile if not os.path.isabs(srcfile): node = cwd.find_resource(srcfile) if not node: raise Utils.WafError("Unable to install the file %r (not found in %s)" % (srcfile, cwd)) src = node.abspath(env) return self.do_install(src, destpath, chmod) def symlink_as(self, path, src, env=None, cwd=None): """example: bld.symlink_as('${PREFIX}/lib/libfoo.so', 'libfoo.so.1.2.3') """ if sys.platform == 'win32': # well, this *cannot* work return if not path: raise Utils.WafError("where do you want to install %r? (%r?)" % (src, path)) tgt = self.get_install_path(path, env) dir, name = os.path.split(tgt) Utils.check_dir(dir) if self.is_install > 0: link = False if not os.path.islink(tgt): link = True elif os.readlink(tgt) != src: link = True if link: try: os.remove(tgt) except OSError: pass info('* symlink %s (-> %s)' % (tgt, src)) os.symlink(src, tgt) return 0 else: # UNINSTALL try: info('* removing %s' % (tgt)) os.remove(tgt) return 0 except OSError: return 1 def exec_command(self, cmd, **kw): # 'runner' zone is printed out for waf -v, see wafadmin/Options.py debug('runner: system command -> %s', cmd) if self.log: self.log.write('%s\n' % cmd) kw['log'] = self.log try: if not kw.get('cwd', None): kw['cwd'] = self.cwd except AttributeError: self.cwd = kw['cwd'] = self.bldnode.abspath() return Utils.exec_command(cmd, **kw) def printout(self, s): f = self.log or sys.stderr f.write(s) f.flush() def add_subdirs(self, dirs): self.recurse(dirs, 'build') def pre_recurse(self, name_or_mod, path, nexdir): if not hasattr(self, 'oldpath'): self.oldpath = [] self.oldpath.append(self.path) self.path = self.root.find_dir(nexdir) return {'bld': self, 'ctx': self} def post_recurse(self, name_or_mod, path, nexdir): self.path = self.oldpath.pop() ###### user-defined behaviour def pre_build(self): if hasattr(self, 'pre_funs'): for m in self.pre_funs: m(self) def post_build(self): if hasattr(self, 'post_funs'): for m in self.post_funs: m(self) def add_pre_fun(self, meth): try: self.pre_funs.append(meth) except AttributeError: self.pre_funs = [meth] def add_post_fun(self, meth): try: self.post_funs.append(meth) except AttributeError: self.post_funs = [meth] def use_the_magic(self): Task.algotype = Task.MAXPARALLEL Task.file_deps = Task.extract_deps self.magic = True install_as = group_method(install_as) install_files = group_method(install_files) symlink_as = group_method(symlink_as) ntdb-1.0/buildtools/wafadmin/Configure.py000066400000000000000000000276221224151530700205530ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2008 (ita) """ Configuration system A configuration instance is created when "waf configure" is called, it is used to: * create data dictionaries (Environment instances) * store the list of modules to import The old model (copied from Scons) was to store logic (mapping file extensions to functions) along with the data. In Waf a way was found to separate that logic by adding an indirection layer (storing the names in the Environment instances) In the new model, the logic is more object-oriented, and the user scripts provide the logic. The data files (Environments) must contain configuration data only (flags, ..). Note: the c/c++ related code is in the module config_c """ import os, shlex, sys, time try: import cPickle except ImportError: import pickle as cPickle import Environment, Utils, Options, Logs from Logs import warn from Constants import * try: from urllib import request except: from urllib import urlopen else: urlopen = request.urlopen conf_template = '''# project %(app)s configured on %(now)s by # waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s) # using %(args)s # ''' class ConfigurationError(Utils.WscriptError): pass autoconfig = False "reconfigure the project automatically" def find_file(filename, path_list): """find a file in a list of paths @param filename: name of the file to search for @param path_list: list of directories to search @return: the first occurrence filename or '' if filename could not be found """ for directory in Utils.to_list(path_list): if os.path.exists(os.path.join(directory, filename)): return directory return '' def find_program_impl(env, filename, path_list=[], var=None, environ=None): """find a program in folders path_lst, and sets env[var] @param env: environment @param filename: name of the program to search for @param path_list: list of directories to search for filename @param var: environment value to be checked for in env or os.environ @return: either the value that is referenced with [var] in env or os.environ or the first occurrence filename or '' if filename could not be found """ if not environ: environ = os.environ try: path_list = path_list.split() except AttributeError: pass if var: if env[var]: return env[var] if var in environ: env[var] = environ[var] if not path_list: path_list = environ.get('PATH', '').split(os.pathsep) ext = (Options.platform == 'win32') and '.exe,.com,.bat,.cmd' or '' for y in [filename+x for x in ext.split(',')]: for directory in path_list: x = os.path.join(directory, y) if os.path.isfile(x): if var: env[var] = x return x return '' class ConfigurationContext(Utils.Context): tests = {} error_handlers = [] def __init__(self, env=None, blddir='', srcdir=''): self.env = None self.envname = '' self.environ = dict(os.environ) self.line_just = 40 self.blddir = blddir self.srcdir = srcdir self.all_envs = {} # curdir: necessary for recursion self.cwd = self.curdir = os.getcwd() self.tools = [] # tools loaded in the configuration, and that will be loaded when building self.setenv(DEFAULT) self.lastprog = '' self.hash = 0 self.files = [] self.tool_cache = [] if self.blddir: self.post_init() def post_init(self): self.cachedir = os.path.join(self.blddir, CACHE_DIR) path = os.path.join(self.blddir, WAF_CONFIG_LOG) try: os.unlink(path) except (OSError, IOError): pass try: self.log = open(path, 'w') except (OSError, IOError): self.fatal('could not open %r for writing' % path) app = Utils.g_module.APPNAME if app: ver = getattr(Utils.g_module, 'VERSION', '') if ver: app = "%s (%s)" % (app, ver) now = time.ctime() pyver = sys.hexversion systype = sys.platform args = " ".join(sys.argv) wafver = WAFVERSION abi = ABI self.log.write(conf_template % vars()) def __del__(self): """cleanup function: close config.log""" # may be ran by the gc, not always after initialization if hasattr(self, 'log') and self.log: self.log.close() def fatal(self, msg): raise ConfigurationError(msg) def check_tool(self, input, tooldir=None, funs=None): "load a waf tool" tools = Utils.to_list(input) if tooldir: tooldir = Utils.to_list(tooldir) for tool in tools: tool = tool.replace('++', 'xx') if tool == 'java': tool = 'javaw' if tool.lower() == 'unittest': tool = 'unittestw' # avoid loading the same tool more than once with the same functions # used by composite projects mag = (tool, id(self.env), funs) if mag in self.tool_cache: continue self.tool_cache.append(mag) module = None try: module = Utils.load_tool(tool, tooldir) except Exception, e: ex = e if Options.options.download: _3rdparty = os.path.normpath(Options.tooldir[0] + os.sep + '..' + os.sep + '3rdparty') # try to download the tool from the repository then # the default is set to false for x in Utils.to_list(Options.remote_repo): for sub in ['branches/waf-%s/wafadmin/3rdparty' % WAFVERSION, 'trunk/wafadmin/3rdparty']: url = '/'.join((x, sub, tool + '.py')) try: web = urlopen(url) if web.getcode() != 200: continue except Exception, e: # on python3 urlopen throws an exception continue else: loc = None try: loc = open(_3rdparty + os.sep + tool + '.py', 'wb') loc.write(web.read()) web.close() finally: if loc: loc.close() Logs.warn('downloaded %s from %s' % (tool, url)) try: module = Utils.load_tool(tool, tooldir) except: Logs.warn('module %s from %s is unusable' % (tool, url)) try: os.unlink(_3rdparty + os.sep + tool + '.py') except: pass continue else: break if not module: Logs.error('Could not load the tool %r or download a suitable replacement from the repository (sys.path %r)\n%s' % (tool, sys.path, e)) raise ex else: Logs.error('Could not load the tool %r in %r (try the --download option?):\n%s' % (tool, sys.path, e)) raise ex if funs is not None: self.eval_rules(funs) else: func = getattr(module, 'detect', None) if func: if type(func) is type(find_file): func(self) else: self.eval_rules(func) self.tools.append({'tool':tool, 'tooldir':tooldir, 'funs':funs}) def sub_config(self, k): "executes the configure function of a wscript module" self.recurse(k, name='configure') def pre_recurse(self, name_or_mod, path, nexdir): return {'conf': self, 'ctx': self} def post_recurse(self, name_or_mod, path, nexdir): if not autoconfig: return self.hash = hash((self.hash, getattr(name_or_mod, 'waf_hash_val', name_or_mod))) self.files.append(path) def store(self, file=''): "save the config results into the cache file" if not os.path.isdir(self.cachedir): os.makedirs(self.cachedir) if not file: file = open(os.path.join(self.cachedir, 'build.config.py'), 'w') file.write('version = 0x%x\n' % HEXVERSION) file.write('tools = %r\n' % self.tools) file.close() if not self.all_envs: self.fatal('nothing to store in the configuration context!') for key in self.all_envs: tmpenv = self.all_envs[key] tmpenv.store(os.path.join(self.cachedir, key + CACHE_SUFFIX)) def set_env_name(self, name, env): "add a new environment called name" self.all_envs[name] = env return env def retrieve(self, name, fromenv=None): "retrieve an environment called name" try: env = self.all_envs[name] except KeyError: env = Environment.Environment() env['PREFIX'] = os.path.abspath(os.path.expanduser(Options.options.prefix)) self.all_envs[name] = env else: if fromenv: warn("The environment %s may have been configured already" % name) return env def setenv(self, name): "enable the environment called name" self.env = self.retrieve(name) self.envname = name def add_os_flags(self, var, dest=None): # do not use 'get' to make certain the variable is not defined try: self.env.append_value(dest or var, Utils.to_list(self.environ[var])) except KeyError: pass def check_message_1(self, sr): self.line_just = max(self.line_just, len(sr)) for x in ('\n', self.line_just * '-', '\n', sr, '\n'): self.log.write(x) Utils.pprint('NORMAL', "%s :" % sr.ljust(self.line_just), sep='') def check_message_2(self, sr, color='GREEN'): self.log.write(sr) self.log.write('\n') Utils.pprint(color, sr) def check_message(self, th, msg, state, option=''): sr = 'Checking for %s %s' % (th, msg) self.check_message_1(sr) p = self.check_message_2 if state: p('ok ' + str(option)) else: p('not found', 'YELLOW') # FIXME remove in waf 1.6 # the parameter 'option' is not used (kept for compatibility) def check_message_custom(self, th, msg, custom, option='', color='PINK'): sr = 'Checking for %s %s' % (th, msg) self.check_message_1(sr) self.check_message_2(custom, color) def msg(self, msg, result, color=None): """Prints a configuration message 'Checking for xxx: ok'""" self.start_msg('Checking for ' + msg) if not isinstance(color, str): color = result and 'GREEN' or 'YELLOW' self.end_msg(result, color) def start_msg(self, msg): try: if self.in_msg: return except: self.in_msg = 0 self.in_msg += 1 self.line_just = max(self.line_just, len(msg)) for x in ('\n', self.line_just * '-', '\n', msg, '\n'): self.log.write(x) Utils.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='') def end_msg(self, result, color): self.in_msg -= 1 if self.in_msg: return if not color: color = 'GREEN' if result == True: msg = 'ok' elif result == False: msg = 'not found' color = 'YELLOW' else: msg = str(result) self.log.write(msg) self.log.write('\n') Utils.pprint(color, msg) def find_program(self, filename, path_list=[], var=None, mandatory=False): "wrapper that adds a configuration message" ret = None if var: if self.env[var]: ret = self.env[var] elif var in os.environ: ret = os.environ[var] if not isinstance(filename, list): filename = [filename] if not ret: for x in filename: ret = find_program_impl(self.env, x, path_list, var, environ=self.environ) if ret: break self.check_message_1('Checking for program %s' % ' or '.join(filename)) self.log.write(' find program=%r paths=%r var=%r\n -> %r\n' % (filename, path_list, var, ret)) if ret: Utils.pprint('GREEN', str(ret)) else: Utils.pprint('YELLOW', 'not found') if mandatory: self.fatal('The program %r is required' % filename) if var: self.env[var] = ret return ret def cmd_to_list(self, cmd): "commands may be written in pseudo shell like 'ccache g++'" if isinstance(cmd, str) and cmd.find(' '): try: os.stat(cmd) except OSError: return shlex.split(cmd) else: return [cmd] return cmd def __getattr__(self, name): r = self.__class__.__dict__.get(name, None) if r: return r if name and name.startswith('require_'): for k in ['check_', 'find_']: n = name.replace('require_', k) ret = self.__class__.__dict__.get(n, None) if ret: def run(*k, **kw): r = ret(self, *k, **kw) if not r: self.fatal('requirement failure') return r return run self.fatal('No such method %r' % name) def eval_rules(self, rules): self.rules = Utils.to_list(rules) for x in self.rules: f = getattr(self, x) if not f: self.fatal("No such method '%s'." % x) try: f() except Exception, e: ret = self.err_handler(x, e) if ret == BREAK: break elif ret == CONTINUE: continue else: self.fatal(e) def err_handler(self, fun, error): pass def conf(f): "decorator: attach new configuration functions" setattr(ConfigurationContext, f.__name__, f) return f def conftest(f): "decorator: attach new configuration tests (registered as strings)" ConfigurationContext.tests[f.__name__] = f return conf(f) ntdb-1.0/buildtools/wafadmin/Constants.py000066400000000000000000000024341224151530700206000ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Yinon dot me gmail 2008 """ these constants are somewhat public, try not to mess them maintainer: the version number is updated from the top-level wscript file """ # do not touch these three lines, they are updated automatically HEXVERSION=0x105019 WAFVERSION="1.5.19" WAFREVISION = "9709M" ABI = 7 # permissions O644 = 420 O755 = 493 MAXJOBS = 99999999 CACHE_DIR = 'c4che' CACHE_SUFFIX = '.cache.py' DBFILE = '.wafpickle-%d' % ABI WSCRIPT_FILE = 'wscript' WSCRIPT_BUILD_FILE = 'wscript_build' WAF_CONFIG_LOG = 'config.log' WAF_CONFIG_H = 'config.h' SIG_NIL = 'iluvcuteoverload' VARIANT = '_VARIANT_' DEFAULT = 'default' SRCDIR = 'srcdir' BLDDIR = 'blddir' APPNAME = 'APPNAME' VERSION = 'VERSION' DEFINES = 'defines' UNDEFINED = () BREAK = "break" CONTINUE = "continue" # task scheduler options JOBCONTROL = "JOBCONTROL" MAXPARALLEL = "MAXPARALLEL" NORMAL = "NORMAL" # task state NOT_RUN = 0 MISSING = 1 CRASHED = 2 EXCEPTION = 3 SKIPPED = 8 SUCCESS = 9 ASK_LATER = -1 SKIP_ME = -2 RUN_ME = -3 LOG_FORMAT = "%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s" HOUR_FORMAT = "%H:%M:%S" TEST_OK = True CFG_FILES = 'cfg_files' # positive '->' install # negative '<-' uninstall INSTALL = 1337 UNINSTALL = -1337 ntdb-1.0/buildtools/wafadmin/Environment.py000066400000000000000000000116651224151530700211360ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005 (ita) """Environment representation There is one gotcha: getitem returns [] if the contents evals to False This means env['foo'] = {}; print env['foo'] will print [] not {} """ import os, copy, re import Logs, Options, Utils from Constants import * re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M) class Environment(object): """A safe-to-use dictionary, but do not attach functions to it please (break cPickle) An environment instance can be stored into a file and loaded easily """ __slots__ = ("table", "parent") def __init__(self, filename=None): self.table = {} #self.parent = None if filename: self.load(filename) def __contains__(self, key): if key in self.table: return True try: return self.parent.__contains__(key) except AttributeError: return False # parent may not exist def __str__(self): keys = set() cur = self while cur: keys.update(cur.table.keys()) cur = getattr(cur, 'parent', None) keys = list(keys) keys.sort() return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in keys]) def __getitem__(self, key): try: while 1: x = self.table.get(key, None) if not x is None: return x self = self.parent except AttributeError: return [] def __setitem__(self, key, value): self.table[key] = value def __delitem__(self, key): del self.table[key] def pop(self, key, *args): if len(args): return self.table.pop(key, *args) return self.table.pop(key) def set_variant(self, name): self.table[VARIANT] = name def variant(self): try: while 1: x = self.table.get(VARIANT, None) if not x is None: return x self = self.parent except AttributeError: return DEFAULT def copy(self): # TODO waf 1.6 rename this method derive, #368 newenv = Environment() newenv.parent = self return newenv def detach(self): """TODO try it modifying the original env will not change the copy""" tbl = self.get_merged_dict() try: delattr(self, 'parent') except AttributeError: pass else: keys = tbl.keys() for x in keys: tbl[x] = copy.deepcopy(tbl[x]) self.table = tbl def get_flat(self, key): s = self[key] if isinstance(s, str): return s return ' '.join(s) def _get_list_value_for_modification(self, key): """Gets a value that must be a list for further modification. The list may be modified inplace and there is no need to "self.table[var] = value" afterwards. """ try: value = self.table[key] except KeyError: try: value = self.parent[key] except AttributeError: value = [] if isinstance(value, list): value = value[:] else: value = [value] else: if not isinstance(value, list): value = [value] self.table[key] = value return value def append_value(self, var, value): current_value = self._get_list_value_for_modification(var) if isinstance(value, list): current_value.extend(value) else: current_value.append(value) def prepend_value(self, var, value): current_value = self._get_list_value_for_modification(var) if isinstance(value, list): current_value = value + current_value # a new list: update the dictionary entry self.table[var] = current_value else: current_value.insert(0, value) # prepend unique would be ambiguous def append_unique(self, var, value): current_value = self._get_list_value_for_modification(var) if isinstance(value, list): for value_item in value: if value_item not in current_value: current_value.append(value_item) else: if value not in current_value: current_value.append(value) def get_merged_dict(self): """compute a merged table""" table_list = [] env = self while 1: table_list.insert(0, env.table) try: env = env.parent except AttributeError: break merged_table = {} for table in table_list: merged_table.update(table) return merged_table def store(self, filename): "Write the variables into a file" file = open(filename, 'w') merged_table = self.get_merged_dict() keys = list(merged_table.keys()) keys.sort() for k in keys: file.write('%s = %r\n' % (k, merged_table[k])) file.close() def load(self, filename): "Retrieve the variables from a file" tbl = self.table code = Utils.readf(filename) for m in re_imp.finditer(code): g = m.group tbl[g(2)] = eval(g(3)) Logs.debug('env: %s', self.table) def get_destdir(self): "return the destdir, useful for installing" if self.__getitem__('NOINSTALL'): return '' return Options.options.destdir def update(self, d): for k, v in d.iteritems(): self[k] = v def __getattr__(self, name): if name in self.__slots__: return object.__getattr__(self, name) else: return self[name] def __setattr__(self, name, value): if name in self.__slots__: object.__setattr__(self, name, value) else: self[name] = value def __delattr__(self, name): if name in self.__slots__: object.__delattr__(self, name) else: del self[name] ntdb-1.0/buildtools/wafadmin/Logs.py000066400000000000000000000054741224151530700175370ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005 (ita) import ansiterm import os, re, logging, traceback, sys from Constants import * zones = '' verbose = 0 colors_lst = { 'USE' : True, 'BOLD' :'\x1b[01;1m', 'RED' :'\x1b[01;31m', 'GREEN' :'\x1b[32m', 'YELLOW':'\x1b[33m', 'PINK' :'\x1b[35m', 'BLUE' :'\x1b[01;34m', 'CYAN' :'\x1b[36m', 'NORMAL':'\x1b[0m', 'cursor_on' :'\x1b[?25h', 'cursor_off' :'\x1b[?25l', } got_tty = False term = os.environ.get('TERM', 'dumb') if not term in ['dumb', 'emacs']: try: got_tty = sys.stderr.isatty() or (sys.platform == 'win32' and term in ['xterm', 'msys']) except AttributeError: pass import Utils if not got_tty or 'NOCOLOR' in os.environ: colors_lst['USE'] = False # test #if sys.platform == 'win32': # colors_lst['USE'] = True def get_color(cl): if not colors_lst['USE']: return '' return colors_lst.get(cl, '') class foo(object): def __getattr__(self, a): return get_color(a) def __call__(self, a): return get_color(a) colors = foo() re_log = re.compile(r'(\w+): (.*)', re.M) class log_filter(logging.Filter): def __init__(self, name=None): pass def filter(self, rec): rec.c1 = colors.PINK rec.c2 = colors.NORMAL rec.zone = rec.module if rec.levelno >= logging.INFO: if rec.levelno >= logging.ERROR: rec.c1 = colors.RED elif rec.levelno >= logging.WARNING: rec.c1 = colors.YELLOW else: rec.c1 = colors.GREEN return True zone = '' m = re_log.match(rec.msg) if m: zone = rec.zone = m.group(1) rec.msg = m.group(2) if zones: return getattr(rec, 'zone', '') in zones or '*' in zones elif not verbose > 2: return False return True class formatter(logging.Formatter): def __init__(self): logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT) def format(self, rec): if rec.levelno >= logging.WARNING or rec.levelno == logging.INFO: try: return '%s%s%s' % (rec.c1, rec.msg.decode('utf-8'), rec.c2) except: return rec.c1+rec.msg+rec.c2 return logging.Formatter.format(self, rec) def debug(*k, **kw): if verbose: k = list(k) k[0] = k[0].replace('\n', ' ') logging.debug(*k, **kw) def error(*k, **kw): logging.error(*k, **kw) if verbose > 1: if isinstance(k[0], Utils.WafError): st = k[0].stack else: st = traceback.extract_stack() if st: st = st[:-1] buf = [] for filename, lineno, name, line in st: buf.append(' File "%s", line %d, in %s' % (filename, lineno, name)) if line: buf.append(' %s' % line.strip()) if buf: logging.error("\n".join(buf)) warn = logging.warn info = logging.info def init_log(): log = logging.getLogger() log.handlers = [] log.filters = [] hdlr = logging.StreamHandler() hdlr.setFormatter(formatter()) log.addHandler(hdlr) log.addFilter(log_filter()) log.setLevel(logging.DEBUG) # may be initialized more than once init_log() ntdb-1.0/buildtools/wafadmin/Node.py000066400000000000000000000444371224151530700175220ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005 (ita) """ Node: filesystem structure, contains lists of nodes IMPORTANT: 1. Each file/folder is represented by exactly one node. 2. Most would-be class properties are stored in Build: nodes to depend on, signature, flags, .. unused class members increase the .wafpickle file size sensibly with lots of objects. 3. The build is launched from the top of the build dir (for example, in _build_/). 4. Node should not be instantiated directly. Each instance of Build.BuildContext has a Node subclass. (aka: 'Nodu', see BuildContext initializer) The BuildContext is referenced here as self.__class__.bld Its Node class is referenced here as self.__class__ The public and advertised apis are the following: ${TGT} -> dir/to/file.ext ${TGT[0].base()} -> dir/to/file ${TGT[0].dir(env)} -> dir/to ${TGT[0].file()} -> file.ext ${TGT[0].file_base()} -> file ${TGT[0].suffix()} -> .ext ${TGT[0].abspath(env)} -> /path/to/dir/to/file.ext """ import os, sys, fnmatch, re, stat import Utils, Constants UNDEFINED = 0 DIR = 1 FILE = 2 BUILD = 3 type_to_string = {UNDEFINED: "unk", DIR: "dir", FILE: "src", BUILD: "bld"} # These fnmatch expressions are used by default to prune the directory tree # while doing the recursive traversal in the find_iter method of the Node class. prune_pats = '.git .bzr .hg .svn _MTN _darcs CVS SCCS'.split() # These fnmatch expressions are used by default to exclude files and dirs # while doing the recursive traversal in the find_iter method of the Node class. exclude_pats = prune_pats + '*~ #*# .#* %*% ._* .gitignore .cvsignore vssver.scc .DS_Store'.split() # These Utils.jar_regexp expressions are used by default to exclude files and dirs and also prune the directory tree # while doing the recursive traversal in the ant_glob method of the Node class. exclude_regs = ''' **/*~ **/#*# **/.#* **/%*% **/._* **/CVS **/CVS/** **/.cvsignore **/SCCS **/SCCS/** **/vssver.scc **/.svn **/.svn/** **/.git **/.git/** **/.gitignore **/.bzr **/.bzr/** **/.hg **/.hg/** **/_MTN **/_MTN/** **/_darcs **/_darcs/** **/.DS_Store''' class Node(object): __slots__ = ("name", "parent", "id", "childs") def __init__(self, name, parent, node_type = UNDEFINED): self.name = name self.parent = parent # assumption: one build object at a time self.__class__.bld.id_nodes += 4 self.id = self.__class__.bld.id_nodes + node_type if node_type == DIR: self.childs = {} # We do not want to add another type attribute (memory) # use the id to find out: type = id & 3 # for setting: new type = type + x - type & 3 if parent and name in parent.childs: raise Utils.WafError('node %s exists in the parent files %r already' % (name, parent)) if parent: parent.childs[name] = self def __setstate__(self, data): if len(data) == 4: (self.parent, self.name, self.id, self.childs) = data else: (self.parent, self.name, self.id) = data def __getstate__(self): if getattr(self, 'childs', None) is None: return (self.parent, self.name, self.id) else: return (self.parent, self.name, self.id, self.childs) def __str__(self): if not self.parent: return '' return "%s://%s" % (type_to_string[self.id & 3], self.abspath()) def __repr__(self): return self.__str__() def __hash__(self): "expensive, make certain it is not used" raise Utils.WafError('nodes, you are doing it wrong') def __copy__(self): "nodes are not supposed to be copied" raise Utils.WafError('nodes are not supposed to be cloned') def get_type(self): return self.id & 3 def set_type(self, t): "dangerous, you are not supposed to use this" self.id = self.id + t - self.id & 3 def dirs(self): return [x for x in self.childs.values() if x.id & 3 == DIR] def files(self): return [x for x in self.childs.values() if x.id & 3 == FILE] def get_dir(self, name, default=None): node = self.childs.get(name, None) if not node or node.id & 3 != DIR: return default return node def get_file(self, name, default=None): node = self.childs.get(name, None) if not node or node.id & 3 != FILE: return default return node def get_build(self, name, default=None): node = self.childs.get(name, None) if not node or node.id & 3 != BUILD: return default return node def find_resource(self, lst): "Find an existing input file: either a build node declared previously or a source node" if isinstance(lst, str): lst = Utils.split_path(lst) if len(lst) == 1: parent = self else: parent = self.find_dir(lst[:-1]) if not parent: return None self.__class__.bld.rescan(parent) name = lst[-1] node = parent.childs.get(name, None) if node: tp = node.id & 3 if tp == FILE or tp == BUILD: return node else: return None tree = self.__class__.bld if not name in tree.cache_dir_contents[parent.id]: return None path = parent.abspath() + os.sep + name try: st = Utils.h_file(path) except IOError: return None child = self.__class__(name, parent, FILE) tree.node_sigs[0][child.id] = st return child def find_or_declare(self, lst): "Used for declaring a build node representing a file being built" if isinstance(lst, str): lst = Utils.split_path(lst) if len(lst) == 1: parent = self else: parent = self.find_dir(lst[:-1]) if not parent: return None self.__class__.bld.rescan(parent) name = lst[-1] node = parent.childs.get(name, None) if node: tp = node.id & 3 if tp != BUILD: raise Utils.WafError('find_or_declare found a source file where a build file was expected %r' % '/'.join(lst)) return node node = self.__class__(name, parent, BUILD) return node def find_dir(self, lst): "search a folder in the filesystem" if isinstance(lst, str): lst = Utils.split_path(lst) current = self for name in lst: self.__class__.bld.rescan(current) prev = current if not current.parent and name == current.name: continue elif not name: continue elif name == '.': continue elif name == '..': current = current.parent or current else: current = prev.childs.get(name, None) if current is None: dir_cont = self.__class__.bld.cache_dir_contents if prev.id in dir_cont and name in dir_cont[prev.id]: if not prev.name: if os.sep == '/': # cygwin //machine/share dirname = os.sep + name else: # windows c: dirname = name else: # regular path dirname = prev.abspath() + os.sep + name if not os.path.isdir(dirname): return None current = self.__class__(name, prev, DIR) elif (not prev.name and len(name) == 2 and name[1] == ':') or name.startswith('\\\\'): # drive letter or \\ path for windows current = self.__class__(name, prev, DIR) else: return None else: if current.id & 3 != DIR: return None return current def ensure_dir_node_from_path(self, lst): "used very rarely, force the construction of a branch of node instance for representing folders" if isinstance(lst, str): lst = Utils.split_path(lst) current = self for name in lst: if not name: continue elif name == '.': continue elif name == '..': current = current.parent or current else: prev = current current = prev.childs.get(name, None) if current is None: current = self.__class__(name, prev, DIR) return current def exclusive_build_node(self, path): """ create a hierarchy in the build dir (no source folders) for ill-behaving compilers the node is not hashed, so you must do it manually after declaring such a node, find_dir and find_resource should work as expected """ lst = Utils.split_path(path) name = lst[-1] if len(lst) > 1: parent = None try: parent = self.find_dir(lst[:-1]) except OSError: pass if not parent: parent = self.ensure_dir_node_from_path(lst[:-1]) self.__class__.bld.rescan(parent) else: try: self.__class__.bld.rescan(parent) except OSError: pass else: parent = self node = parent.childs.get(name, None) if not node: node = self.__class__(name, parent, BUILD) return node def path_to_parent(self, parent): "path relative to a direct ancestor, as string" lst = [] p = self h1 = parent.height() h2 = p.height() while h2 > h1: h2 -= 1 lst.append(p.name) p = p.parent if lst: lst.reverse() ret = os.path.join(*lst) else: ret = '' return ret def find_ancestor(self, node): "find a common ancestor for two nodes - for the shortest path in hierarchy" dist = self.height() - node.height() if dist < 0: return node.find_ancestor(self) # now the real code cand = self while dist > 0: cand = cand.parent dist -= 1 if cand == node: return cand cursor = node while cand.parent: cand = cand.parent cursor = cursor.parent if cand == cursor: return cand def relpath_gen(self, from_node): "string representing a relative path between self to another node" if self == from_node: return '.' if from_node.parent == self: return '..' # up_path is '../../../' and down_path is 'dir/subdir/subdir/file' ancestor = self.find_ancestor(from_node) lst = [] cand = self while not cand.id == ancestor.id: lst.append(cand.name) cand = cand.parent cand = from_node while not cand.id == ancestor.id: lst.append('..') cand = cand.parent lst.reverse() return os.sep.join(lst) def nice_path(self, env=None): "printed in the console, open files easily from the launch directory" tree = self.__class__.bld ln = tree.launch_node() if self.id & 3 == FILE: return self.relpath_gen(ln) else: return os.path.join(tree.bldnode.relpath_gen(ln), env.variant(), self.relpath_gen(tree.srcnode)) def is_child_of(self, node): "does this node belong to the subtree node" p = self diff = self.height() - node.height() while diff > 0: diff -= 1 p = p.parent return p.id == node.id def variant(self, env): "variant, or output directory for this node, a source has for variant 0" if not env: return 0 elif self.id & 3 == FILE: return 0 else: return env.variant() def height(self): "amount of parents" # README a cache can be added here if necessary d = self val = -1 while d: d = d.parent val += 1 return val # helpers for building things def abspath(self, env=None): """ absolute path @param env [Environment]: * obligatory for build nodes: build/variant/src/dir/bar.o * optional for dirs: get either src/dir or build/variant/src/dir * excluded for source nodes: src/dir/bar.c Instead of computing the absolute path each time again, store the already-computed absolute paths in one of (variants+1) dictionaries: bld.cache_node_abspath[0] holds absolute paths for source nodes. bld.cache_node_abspath[variant] holds the absolute path for the build nodes which reside in the variant given by env. """ ## absolute path - hot zone, so do not touch # less expensive variant = (env and (self.id & 3 != FILE) and env.variant()) or 0 ret = self.__class__.bld.cache_node_abspath[variant].get(self.id, None) if ret: return ret if not variant: # source directory if not self.parent: val = os.sep == '/' and os.sep or '' elif not self.parent.name: # root val = (os.sep == '/' and os.sep or '') + self.name else: val = self.parent.abspath() + os.sep + self.name else: # build directory val = os.sep.join((self.__class__.bld.bldnode.abspath(), variant, self.path_to_parent(self.__class__.bld.srcnode))) self.__class__.bld.cache_node_abspath[variant][self.id] = val return val def change_ext(self, ext): "node of the same path, but with a different extension - hot zone so do not touch" name = self.name k = name.rfind('.') if k >= 0: name = name[:k] + ext else: name = name + ext return self.parent.find_or_declare([name]) def src_dir(self, env): "src path without the file name" return self.parent.srcpath(env) def bld_dir(self, env): "build path without the file name" return self.parent.bldpath(env) def bld_base(self, env): "build path without the extension: src/dir/foo(.cpp)" s = os.path.splitext(self.name)[0] return os.path.join(self.bld_dir(env), s) def bldpath(self, env=None): "path seen from the build dir default/src/foo.cpp" if self.id & 3 == FILE: return self.relpath_gen(self.__class__.bld.bldnode) p = self.path_to_parent(self.__class__.bld.srcnode) if p is not '': return env.variant() + os.sep + p return env.variant() def srcpath(self, env=None): "path in the srcdir from the build dir ../src/foo.cpp" if self.id & 3 == BUILD: return self.bldpath(env) return self.relpath_gen(self.__class__.bld.bldnode) def read(self, env): "get the contents of a file, it is not used anywhere for the moment" return Utils.readf(self.abspath(env)) def dir(self, env): "scons-like" return self.parent.abspath(env) def file(self): "scons-like" return self.name def file_base(self): "scons-like" return os.path.splitext(self.name)[0] def suffix(self): "scons-like - hot zone so do not touch" k = max(0, self.name.rfind('.')) return self.name[k:] def find_iter_impl(self, src=True, bld=True, dir=True, accept_name=None, is_prune=None, maxdepth=25): """find nodes in the filesystem hierarchy, try to instanciate the nodes passively; same gotcha as ant_glob""" bld_ctx = self.__class__.bld bld_ctx.rescan(self) for name in bld_ctx.cache_dir_contents[self.id]: if accept_name(self, name): node = self.find_resource(name) if node: if src and node.id & 3 == FILE: yield node else: node = self.find_dir(name) if node and node.id != bld_ctx.bldnode.id: if dir: yield node if not is_prune(self, name): if maxdepth: for k in node.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth - 1): yield k else: if not is_prune(self, name): node = self.find_resource(name) if not node: # not a file, it is a dir node = self.find_dir(name) if node and node.id != bld_ctx.bldnode.id: if maxdepth: for k in node.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth - 1): yield k if bld: for node in self.childs.values(): if node.id == bld_ctx.bldnode.id: continue if node.id & 3 == BUILD: if accept_name(self, node.name): yield node raise StopIteration def find_iter(self, in_pat=['*'], ex_pat=exclude_pats, prune_pat=prune_pats, src=True, bld=True, dir=False, maxdepth=25, flat=False): """find nodes recursively, this returns everything but folders by default; same gotcha as ant_glob""" if not (src or bld or dir): raise StopIteration if self.id & 3 != DIR: raise StopIteration in_pat = Utils.to_list(in_pat) ex_pat = Utils.to_list(ex_pat) prune_pat = Utils.to_list(prune_pat) def accept_name(node, name): for pat in ex_pat: if fnmatch.fnmatchcase(name, pat): return False for pat in in_pat: if fnmatch.fnmatchcase(name, pat): return True return False def is_prune(node, name): for pat in prune_pat: if fnmatch.fnmatchcase(name, pat): return True return False ret = self.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth) if flat: return " ".join([x.relpath_gen(self) for x in ret]) return ret def ant_glob(self, *k, **kw): """ known gotcha: will enumerate the files, but only if the folder exists in the source directory """ src=kw.get('src', 1) bld=kw.get('bld', 0) dir=kw.get('dir', 0) excl = kw.get('excl', exclude_regs) incl = k and k[0] or kw.get('incl', '**') def to_pat(s): lst = Utils.to_list(s) ret = [] for x in lst: x = x.replace('//', '/') if x.endswith('/'): x += '**' lst2 = x.split('/') accu = [] for k in lst2: if k == '**': accu.append(k) else: k = k.replace('.', '[.]').replace('*', '.*').replace('?', '.') k = '^%s$' % k #print "pattern", k accu.append(re.compile(k)) ret.append(accu) return ret def filtre(name, nn): ret = [] for lst in nn: if not lst: pass elif lst[0] == '**': ret.append(lst) if len(lst) > 1: if lst[1].match(name): ret.append(lst[2:]) else: ret.append([]) elif lst[0].match(name): ret.append(lst[1:]) return ret def accept(name, pats): nacc = filtre(name, pats[0]) nrej = filtre(name, pats[1]) if [] in nrej: nacc = [] return [nacc, nrej] def ant_iter(nodi, maxdepth=25, pats=[]): nodi.__class__.bld.rescan(nodi) tmp = list(nodi.__class__.bld.cache_dir_contents[nodi.id]) tmp.sort() for name in tmp: npats = accept(name, pats) if npats and npats[0]: accepted = [] in npats[0] #print accepted, nodi, name node = nodi.find_resource(name) if node and accepted: if src and node.id & 3 == FILE: yield node else: node = nodi.find_dir(name) if node and node.id != nodi.__class__.bld.bldnode.id: if accepted and dir: yield node if maxdepth: for k in ant_iter(node, maxdepth=maxdepth - 1, pats=npats): yield k if bld: for node in nodi.childs.values(): if node.id == nodi.__class__.bld.bldnode.id: continue if node.id & 3 == BUILD: npats = accept(node.name, pats) if npats and npats[0] and [] in npats[0]: yield node raise StopIteration ret = [x for x in ant_iter(self, pats=[to_pat(incl), to_pat(excl)])] if kw.get('flat', True): return " ".join([x.relpath_gen(self) for x in ret]) return ret def update_build_dir(self, env=None): if not env: for env in bld.all_envs: self.update_build_dir(env) return path = self.abspath(env) lst = Utils.listdir(path) try: self.__class__.bld.cache_dir_contents[self.id].update(lst) except KeyError: self.__class__.bld.cache_dir_contents[self.id] = set(lst) self.__class__.bld.cache_scanned_folders[self.id] = True for k in lst: npath = path + os.sep + k st = os.stat(npath) if stat.S_ISREG(st[stat.ST_MODE]): ick = self.find_or_declare(k) if not (ick.id in self.__class__.bld.node_sigs[env.variant()]): self.__class__.bld.node_sigs[env.variant()][ick.id] = Constants.SIG_NIL elif stat.S_ISDIR(st[stat.ST_MODE]): child = self.find_dir(k) if not child: child = self.ensure_dir_node_from_path(k) child.update_build_dir(env) class Nodu(Node): pass ntdb-1.0/buildtools/wafadmin/Options.py000066400000000000000000000172131224151530700202600ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Scott Newton, 2005 (scottn) # Thomas Nagy, 2006 (ita) "Custom command-line options" import os, sys, imp, types, tempfile, optparse import Logs, Utils from Constants import * cmds = 'distclean configure build install clean uninstall check dist distcheck'.split() # TODO remove in waf 1.6 the following two commands = {} is_install = False options = {} arg_line = [] launch_dir = '' tooldir = '' lockfile = os.environ.get('WAFLOCK', '.lock-wscript') try: cache_global = os.path.abspath(os.environ['WAFCACHE']) except KeyError: cache_global = '' platform = Utils.unversioned_sys_platform() conf_file = 'conf-runs-%s-%d.pickle' % (platform, ABI) remote_repo = ['http://waf.googlecode.com/svn/'] """remote directory for the plugins""" # Such a command-line should work: JOBS=4 PREFIX=/opt/ DESTDIR=/tmp/ahoj/ waf configure default_prefix = os.environ.get('PREFIX') if not default_prefix: if platform == 'win32': d = tempfile.gettempdir() default_prefix = d[0].upper() + d[1:] # win32 preserves the case, but gettempdir does not else: default_prefix = '/usr/local/' default_jobs = os.environ.get('JOBS', -1) if default_jobs < 1: try: if 'SC_NPROCESSORS_ONLN' in os.sysconf_names: default_jobs = os.sysconf('SC_NPROCESSORS_ONLN') else: default_jobs = int(Utils.cmd_output(['sysctl', '-n', 'hw.ncpu'])) except: if os.name == 'java': # platform.system() == 'Java' from java.lang import Runtime default_jobs = Runtime.getRuntime().availableProcessors() else: # environment var defined on win32 default_jobs = int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) default_destdir = os.environ.get('DESTDIR', '') def get_usage(self): cmds_str = [] module = Utils.g_module if module: # create the help messages for commands tbl = module.__dict__ keys = list(tbl.keys()) keys.sort() if 'build' in tbl: if not module.build.__doc__: module.build.__doc__ = 'builds the project' if 'configure' in tbl: if not module.configure.__doc__: module.configure.__doc__ = 'configures the project' ban = ['set_options', 'init', 'shutdown'] optlst = [x for x in keys if not x in ban and type(tbl[x]) is type(parse_args_impl) and tbl[x].__doc__ and not x.startswith('_')] just = max([len(x) for x in optlst]) for x in optlst: cmds_str.append(' %s: %s' % (x.ljust(just), tbl[x].__doc__)) ret = '\n'.join(cmds_str) else: ret = ' '.join(cmds) return '''waf [command] [options] Main commands (example: ./waf build -j4) %s ''' % ret setattr(optparse.OptionParser, 'get_usage', get_usage) def create_parser(module=None): Logs.debug('options: create_parser is called') parser = optparse.OptionParser(conflict_handler="resolve", version = 'waf %s (%s)' % (WAFVERSION, WAFREVISION)) parser.formatter.width = Utils.get_term_cols() p = parser.add_option p('-j', '--jobs', type = 'int', default = default_jobs, help = 'amount of parallel jobs (%r)' % default_jobs, dest = 'jobs') p('-k', '--keep', action = 'store_true', default = False, help = 'keep running happily on independent task groups', dest = 'keep') p('-v', '--verbose', action = 'count', default = 0, help = 'verbosity level -v -vv or -vvv [default: 0]', dest = 'verbose') p('--nocache', action = 'store_true', default = False, help = 'ignore the WAFCACHE (if set)', dest = 'nocache') p('--zones', action = 'store', default = '', help = 'debugging zones (task_gen, deps, tasks, etc)', dest = 'zones') p('-p', '--progress', action = 'count', default = 0, help = '-p: progress bar; -pp: ide output', dest = 'progress_bar') p('--targets', action = 'store', default = '', help = 'build given task generators, e.g. "target1,target2"', dest = 'compile_targets') gr = optparse.OptionGroup(parser, 'configuration options') parser.add_option_group(gr) gr.add_option('-b', '--blddir', action = 'store', default = '', help = 'out dir for the project (configuration)', dest = 'blddir') gr.add_option('-s', '--srcdir', action = 'store', default = '', help = 'top dir for the project (configuration)', dest = 'srcdir') gr.add_option('--prefix', help = 'installation prefix (configuration) [default: %r]' % default_prefix, default = default_prefix, dest = 'prefix') gr.add_option('--download', action = 'store_true', default = False, help = 'try to download the tools if missing', dest = 'download') gr = optparse.OptionGroup(parser, 'installation options') parser.add_option_group(gr) gr.add_option('--destdir', help = 'installation root [default: %r]' % default_destdir, default = default_destdir, dest = 'destdir') gr.add_option('-f', '--force', action = 'store_true', default = False, help = 'force file installation', dest = 'force') return parser def parse_args_impl(parser, _args=None): global options, commands, arg_line (options, args) = parser.parse_args(args=_args) arg_line = args #arg_line = args[:] # copy # By default, 'waf' is equivalent to 'waf build' commands = {} for var in cmds: commands[var] = 0 if not args: commands['build'] = 1 args.append('build') # Parse the command arguments for arg in args: commands[arg] = True # the check thing depends on the build if 'check' in args: idx = args.index('check') try: bidx = args.index('build') if bidx > idx: raise ValueError('build before check') except ValueError, e: args.insert(idx, 'build') if args[0] != 'init': args.insert(0, 'init') # TODO -k => -j0 if options.keep: options.jobs = 1 if options.jobs < 1: options.jobs = 1 if 'install' in sys.argv or 'uninstall' in sys.argv: # absolute path only if set options.destdir = options.destdir and os.path.abspath(os.path.expanduser(options.destdir)) Logs.verbose = options.verbose Logs.init_log() if options.zones: Logs.zones = options.zones.split(',') if not Logs.verbose: Logs.verbose = 1 elif Logs.verbose > 0: Logs.zones = ['runner'] if Logs.verbose > 2: Logs.zones = ['*'] # TODO waf 1.6 # 1. rename the class to OptionsContext # 2. instead of a class attribute, use a module (static 'parser') # 3. parse_args_impl was made in times when we did not know about binding new methods to classes class Handler(Utils.Context): """loads wscript modules in folders for adding options This class should be named 'OptionsContext' A method named 'recurse' is bound when used by the module Scripting""" parser = None # make it possible to access the reference, like Build.bld def __init__(self, module=None): self.parser = create_parser(module) self.cwd = os.getcwd() Handler.parser = self def add_option(self, *k, **kw): self.parser.add_option(*k, **kw) def add_option_group(self, *k, **kw): return self.parser.add_option_group(*k, **kw) def get_option_group(self, opt_str): return self.parser.get_option_group(opt_str) def sub_options(self, *k, **kw): if not k: raise Utils.WscriptError('folder expected') self.recurse(k[0], name='set_options') def tool_options(self, *k, **kw): Utils.python_24_guard() if not k[0]: raise Utils.WscriptError('invalid tool_options call %r %r' % (k, kw)) tools = Utils.to_list(k[0]) # TODO waf 1.6 remove the global variable tooldir path = Utils.to_list(kw.get('tdir', kw.get('tooldir', tooldir))) for tool in tools: tool = tool.replace('++', 'xx') if tool == 'java': tool = 'javaw' if tool.lower() == 'unittest': tool = 'unittestw' module = Utils.load_tool(tool, path) try: fun = module.set_options except AttributeError: pass else: fun(kw.get('option_group', self)) def parse_args(self, args=None): parse_args_impl(self.parser, args) ntdb-1.0/buildtools/wafadmin/Runner.py000066400000000000000000000126641224151530700201030ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2008 (ita) "Execute the tasks" import os, sys, random, time, threading, traceback try: from Queue import Queue except ImportError: from queue import Queue import Build, Utils, Logs, Options from Logs import debug, error from Constants import * GAP = 15 run_old = threading.Thread.run def run(*args, **kwargs): try: run_old(*args, **kwargs) except (KeyboardInterrupt, SystemExit): raise except: sys.excepthook(*sys.exc_info()) threading.Thread.run = run def process_task(tsk): m = tsk.master if m.stop: m.out.put(tsk) return try: tsk.generator.bld.printout(tsk.display()) if tsk.__class__.stat: ret = tsk.__class__.stat(tsk) # actual call to task's run() function else: ret = tsk.call_run() except Exception, e: tsk.err_msg = Utils.ex_stack() tsk.hasrun = EXCEPTION # TODO cleanup m.error_handler(tsk) m.out.put(tsk) return if ret: tsk.err_code = ret tsk.hasrun = CRASHED else: try: tsk.post_run() except Utils.WafError: pass except Exception: tsk.err_msg = Utils.ex_stack() tsk.hasrun = EXCEPTION else: tsk.hasrun = SUCCESS if tsk.hasrun != SUCCESS: m.error_handler(tsk) m.out.put(tsk) class TaskConsumer(threading.Thread): ready = Queue(0) consumers = [] def __init__(self): threading.Thread.__init__(self) self.setDaemon(1) self.start() def run(self): try: self.loop() except: pass def loop(self): while 1: tsk = TaskConsumer.ready.get() process_task(tsk) class Parallel(object): """ keep the consumer threads busy, and avoid consuming cpu cycles when no more tasks can be added (end of the build, etc) """ def __init__(self, bld, j=2): # number of consumers self.numjobs = j self.manager = bld.task_manager self.manager.current_group = 0 self.total = self.manager.total() # tasks waiting to be processed - IMPORTANT self.outstanding = [] self.maxjobs = MAXJOBS # tasks that are awaiting for another task to complete self.frozen = [] # tasks returned by the consumers self.out = Queue(0) self.count = 0 # tasks not in the producer area self.processed = 1 # progress indicator self.stop = False # error condition to stop the build self.error = False # error flag def get_next(self): "override this method to schedule the tasks in a particular order" if not self.outstanding: return None return self.outstanding.pop(0) def postpone(self, tsk): "override this method to schedule the tasks in a particular order" # TODO consider using a deque instead if random.randint(0, 1): self.frozen.insert(0, tsk) else: self.frozen.append(tsk) def refill_task_list(self): "called to set the next group of tasks" while self.count > self.numjobs + GAP or self.count >= self.maxjobs: self.get_out() while not self.outstanding: if self.count: self.get_out() if self.frozen: self.outstanding += self.frozen self.frozen = [] elif not self.count: (jobs, tmp) = self.manager.get_next_set() if jobs != None: self.maxjobs = jobs if tmp: self.outstanding += tmp break def get_out(self): "the tasks that are put to execute are all collected using get_out" ret = self.out.get() self.manager.add_finished(ret) if not self.stop and getattr(ret, 'more_tasks', None): self.outstanding += ret.more_tasks self.total += len(ret.more_tasks) self.count -= 1 def error_handler(self, tsk): "by default, errors make the build stop (not thread safe so be careful)" if not Options.options.keep: self.stop = True self.error = True def start(self): "execute the tasks" if TaskConsumer.consumers: # the worker pool is usually loaded lazily (see below) # in case it is re-used with a different value of numjobs: while len(TaskConsumer.consumers) < self.numjobs: TaskConsumer.consumers.append(TaskConsumer()) while not self.stop: self.refill_task_list() # consider the next task tsk = self.get_next() if not tsk: if self.count: # tasks may add new ones after they are run continue else: # no tasks to run, no tasks running, time to exit break if tsk.hasrun: # if the task is marked as "run", just skip it self.processed += 1 self.manager.add_finished(tsk) continue try: st = tsk.runnable_status() except Exception, e: self.processed += 1 if self.stop and not Options.options.keep: tsk.hasrun = SKIPPED self.manager.add_finished(tsk) continue self.error_handler(tsk) self.manager.add_finished(tsk) tsk.hasrun = EXCEPTION tsk.err_msg = Utils.ex_stack() continue if st == ASK_LATER: self.postpone(tsk) elif st == SKIP_ME: self.processed += 1 tsk.hasrun = SKIPPED self.manager.add_finished(tsk) else: # run me: put the task in ready queue tsk.position = (self.processed, self.total) self.count += 1 tsk.master = self self.processed += 1 if self.numjobs == 1: process_task(tsk) else: TaskConsumer.ready.put(tsk) # create the consumer threads only if there is something to consume if not TaskConsumer.consumers: TaskConsumer.consumers = [TaskConsumer() for i in xrange(self.numjobs)] # self.count represents the tasks that have been made available to the consumer threads # collect all the tasks after an error else the message may be incomplete while self.error and self.count: self.get_out() #print loop assert (self.count == 0 or self.stop) ntdb-1.0/buildtools/wafadmin/Scripting.py000066400000000000000000000357031224151530700205730ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005 (ita) "Module called for configuring, compiling and installing targets" import os, sys, shutil, traceback, datetime, inspect, errno import Utils, Configure, Build, Logs, Options, Environment, Task from Logs import error, warn, info from Constants import * g_gz = 'bz2' commands = [] def prepare_impl(t, cwd, ver, wafdir): Options.tooldir = [t] Options.launch_dir = cwd # some command-line options can be processed immediately if '--version' in sys.argv: opt_obj = Options.Handler() opt_obj.curdir = cwd opt_obj.parse_args() sys.exit(0) # now find the wscript file msg1 = 'Waf: Please run waf from a directory containing a file named "%s" or run distclean' % WSCRIPT_FILE # in theory projects can be configured in an autotool-like manner: # mkdir build && cd build && ../waf configure && ../waf build_dir_override = None candidate = None lst = os.listdir(cwd) search_for_candidate = True if WSCRIPT_FILE in lst: candidate = cwd elif 'configure' in sys.argv and not WSCRIPT_BUILD_FILE in lst: # autotool-like configuration calldir = os.path.abspath(os.path.dirname(sys.argv[0])) if WSCRIPT_FILE in os.listdir(calldir): candidate = calldir search_for_candidate = False else: error('arg[0] directory does not contain a wscript file') sys.exit(1) build_dir_override = cwd # climb up to find a script if it is not found while search_for_candidate: if len(cwd) <= 3: break # stop at / or c: dirlst = os.listdir(cwd) if WSCRIPT_FILE in dirlst: candidate = cwd if 'configure' in sys.argv and candidate: break if Options.lockfile in dirlst: env = Environment.Environment() try: env.load(os.path.join(cwd, Options.lockfile)) except: error('could not load %r' % Options.lockfile) try: os.stat(env['cwd']) except: candidate = cwd else: candidate = env['cwd'] break cwd = os.path.dirname(cwd) # climb up if not candidate: # check if the user only wanted to display the help if '-h' in sys.argv or '--help' in sys.argv: warn('No wscript file found: the help message may be incomplete') opt_obj = Options.Handler() opt_obj.curdir = cwd opt_obj.parse_args() else: error(msg1) sys.exit(0) # We have found wscript, but there is no guarantee that it is valid try: os.chdir(candidate) except OSError: raise Utils.WafError("the folder %r is unreadable" % candidate) # define the main module containing the functions init, shutdown, .. Utils.set_main_module(os.path.join(candidate, WSCRIPT_FILE)) if build_dir_override: d = getattr(Utils.g_module, BLDDIR, None) if d: # test if user has set the blddir in wscript. msg = ' Overriding build directory %s with %s' % (d, build_dir_override) warn(msg) Utils.g_module.blddir = build_dir_override # bind a few methods and classes by default def set_def(obj, name=''): n = name or obj.__name__ if not n in Utils.g_module.__dict__: setattr(Utils.g_module, n, obj) for k in [dist, distclean, distcheck, clean, install, uninstall]: set_def(k) set_def(Configure.ConfigurationContext, 'configure_context') for k in ['build', 'clean', 'install', 'uninstall']: set_def(Build.BuildContext, k + '_context') # now parse the options from the user wscript file opt_obj = Options.Handler(Utils.g_module) opt_obj.curdir = candidate try: f = Utils.g_module.set_options except AttributeError: pass else: opt_obj.sub_options(['']) opt_obj.parse_args() if not 'init' in Utils.g_module.__dict__: Utils.g_module.init = Utils.nada if not 'shutdown' in Utils.g_module.__dict__: Utils.g_module.shutdown = Utils.nada main() def prepare(t, cwd, ver, wafdir): if WAFVERSION != ver: msg = 'Version mismatch: waf %s <> wafadmin %s (wafdir %s)' % (ver, WAFVERSION, wafdir) print('\033[91mError: %s\033[0m' % msg) sys.exit(1) #""" try: prepare_impl(t, cwd, ver, wafdir) except Utils.WafError, e: error(str(e)) sys.exit(1) except KeyboardInterrupt: Utils.pprint('RED', 'Interrupted') sys.exit(68) """ import cProfile, pstats cProfile.runctx("import Scripting; Scripting.prepare_impl(t, cwd, ver, wafdir)", {}, {'t': t, 'cwd':cwd, 'ver':ver, 'wafdir':wafdir}, 'profi.txt') p = pstats.Stats('profi.txt') p.sort_stats('time').print_stats(45) #""" def main(): global commands commands = Options.arg_line[:] while commands: x = commands.pop(0) ini = datetime.datetime.now() if x == 'configure': fun = configure elif x == 'build': fun = build else: fun = getattr(Utils.g_module, x, None) if not fun: raise Utils.WscriptError('No such command %r' % x) ctx = getattr(Utils.g_module, x + '_context', Utils.Context)() if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']: # compatibility TODO remove in waf 1.6 try: fun(ctx) except TypeError: fun() else: fun(ctx) ela = '' if not Options.options.progress_bar: ela = ' (%s)' % Utils.get_elapsed_time(ini) if x != 'init' and x != 'shutdown': info('%r finished successfully%s' % (x, ela)) if not commands and x != 'shutdown': commands.append('shutdown') def configure(conf): src = getattr(Options.options, SRCDIR, None) if not src: src = getattr(Utils.g_module, SRCDIR, None) if not src: src = getattr(Utils.g_module, 'top', None) if not src: src = '.' incomplete_src = 1 src = os.path.abspath(src) bld = getattr(Options.options, BLDDIR, None) if not bld: bld = getattr(Utils.g_module, BLDDIR, None) if not bld: bld = getattr(Utils.g_module, 'out', None) if not bld: bld = 'build' incomplete_bld = 1 if bld == '.': raise Utils.WafError('Setting blddir="." may cause distclean problems') bld = os.path.abspath(bld) try: os.makedirs(bld) except OSError: pass # It is not possible to compile specific targets in the configuration # this may cause configuration errors if autoconfig is set targets = Options.options.compile_targets Options.options.compile_targets = None Options.is_install = False conf.srcdir = src conf.blddir = bld conf.post_init() if 'incomplete_src' in vars(): conf.check_message_1('Setting srcdir to') conf.check_message_2(src) if 'incomplete_bld' in vars(): conf.check_message_1('Setting blddir to') conf.check_message_2(bld) # calling to main wscript's configure() conf.sub_config(['']) conf.store() # this will write a configure lock so that subsequent builds will # consider the current path as the root directory (see prepare_impl). # to remove: use 'waf distclean' env = Environment.Environment() env[BLDDIR] = bld env[SRCDIR] = src env['argv'] = sys.argv env['commands'] = Options.commands env['options'] = Options.options.__dict__ # conf.hash & conf.files hold wscript files paths and hash # (used only by Configure.autoconfig) env['hash'] = conf.hash env['files'] = conf.files env['environ'] = dict(conf.environ) env['cwd'] = os.path.split(Utils.g_module.root_path)[0] if Utils.g_module.root_path != src: # in case the source dir is somewhere else env.store(os.path.join(src, Options.lockfile)) env.store(Options.lockfile) Options.options.compile_targets = targets def clean(bld): '''removes the build files''' try: proj = Environment.Environment(Options.lockfile) except IOError: raise Utils.WafError('Nothing to clean (project not configured)') bld.load_dirs(proj[SRCDIR], proj[BLDDIR]) bld.load_envs() bld.is_install = 0 # False # read the scripts - and set the path to the wscript path (useful for srcdir='/foo/bar') bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]]) try: bld.clean() finally: bld.save() def check_configured(bld): if not Configure.autoconfig: return bld conf_cls = getattr(Utils.g_module, 'configure_context', Utils.Context) bld_cls = getattr(Utils.g_module, 'build_context', Utils.Context) def reconf(proj): back = (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose) Options.commands = proj['commands'] Options.options.__dict__ = proj['options'] conf = conf_cls() conf.environ = proj['environ'] configure(conf) (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose) = back try: proj = Environment.Environment(Options.lockfile) except IOError: conf = conf_cls() configure(conf) else: try: bld = bld_cls() bld.load_dirs(proj[SRCDIR], proj[BLDDIR]) bld.load_envs() except Utils.WafError: reconf(proj) return bld_cls() try: proj = Environment.Environment(Options.lockfile) except IOError: raise Utils.WafError('Auto-config: project does not configure (bug)') h = 0 try: for file in proj['files']: if file.endswith('configure'): h = hash((h, Utils.readf(file))) else: mod = Utils.load_module(file) h = hash((h, mod.waf_hash_val)) except (OSError, IOError): warn('Reconfiguring the project: a file is unavailable') reconf(proj) else: if (h != proj['hash']): warn('Reconfiguring the project: the configuration has changed') reconf(proj) return bld_cls() def install(bld): '''installs the build files''' bld = check_configured(bld) Options.commands['install'] = True Options.commands['uninstall'] = False Options.is_install = True bld.is_install = INSTALL build_impl(bld) bld.install() def uninstall(bld): '''removes the installed files''' Options.commands['install'] = False Options.commands['uninstall'] = True Options.is_install = True bld.is_install = UNINSTALL try: def runnable_status(self): return SKIP_ME setattr(Task.Task, 'runnable_status_back', Task.Task.runnable_status) setattr(Task.Task, 'runnable_status', runnable_status) build_impl(bld) bld.install() finally: setattr(Task.Task, 'runnable_status', Task.Task.runnable_status_back) def build(bld): bld = check_configured(bld) Options.commands['install'] = False Options.commands['uninstall'] = False Options.is_install = False bld.is_install = 0 # False return build_impl(bld) def build_impl(bld): # compile the project and/or install the files try: proj = Environment.Environment(Options.lockfile) except IOError: raise Utils.WafError("Project not configured (run 'waf configure' first)") bld.load_dirs(proj[SRCDIR], proj[BLDDIR]) bld.load_envs() info("Waf: Entering directory `%s'" % bld.bldnode.abspath()) bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]]) # execute something immediately before the build starts bld.pre_build() try: bld.compile() finally: if Options.options.progress_bar: print('') info("Waf: Leaving directory `%s'" % bld.bldnode.abspath()) # execute something immediately after a successful build bld.post_build() bld.install() excludes = '.bzr .bzrignore .git .gitignore .svn CVS .cvsignore .arch-ids {arch} SCCS BitKeeper .hg _MTN _darcs Makefile Makefile.in config.log .gitattributes .hgignore .hgtags'.split() dist_exts = '~ .rej .orig .pyc .pyo .bak .tar.bz2 tar.gz .zip .swp'.split() def dont_dist(name, src, build_dir): global excludes, dist_exts if (name.startswith(',,') or name.startswith('++') or name.startswith('.waf') or (src == '.' and name == Options.lockfile) or name in excludes or name == build_dir ): return True for ext in dist_exts: if name.endswith(ext): return True return False # like shutil.copytree # exclude files and to raise exceptions immediately def copytree(src, dst, build_dir): names = os.listdir(src) os.makedirs(dst) for name in names: srcname = os.path.join(src, name) dstname = os.path.join(dst, name) if dont_dist(name, src, build_dir): continue if os.path.isdir(srcname): copytree(srcname, dstname, build_dir) else: shutil.copy2(srcname, dstname) # TODO in waf 1.6, change this method if "srcdir == blddir" is allowed def distclean(ctx=None): '''removes the build directory''' global commands lst = os.listdir('.') for f in lst: if f == Options.lockfile: try: proj = Environment.Environment(f) except: Logs.warn('could not read %r' % f) continue try: shutil.rmtree(proj[BLDDIR]) except IOError: pass except OSError, e: if e.errno != errno.ENOENT: Logs.warn('project %r cannot be removed' % proj[BLDDIR]) try: os.remove(f) except OSError, e: if e.errno != errno.ENOENT: Logs.warn('file %r cannot be removed' % f) # remove the local waf cache if not commands and f.startswith('.waf'): shutil.rmtree(f, ignore_errors=True) # FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version def dist(appname='', version=''): '''makes a tarball for redistributing the sources''' # return return (distdirname, tarballname) import tarfile if not appname: appname = Utils.g_module.APPNAME if not version: version = Utils.g_module.VERSION tmp_folder = appname + '-' + version if g_gz in ['gz', 'bz2']: arch_name = tmp_folder + '.tar.' + g_gz else: arch_name = tmp_folder + '.' + 'zip' # remove the previous dir try: shutil.rmtree(tmp_folder) except (OSError, IOError): pass # remove the previous archive try: os.remove(arch_name) except (OSError, IOError): pass # copy the files into the temporary folder blddir = getattr(Utils.g_module, BLDDIR, None) if not blddir: blddir = getattr(Utils.g_module, 'out', None) copytree('.', tmp_folder, blddir) # undocumented hook for additional cleanup dist_hook = getattr(Utils.g_module, 'dist_hook', None) if dist_hook: back = os.getcwd() os.chdir(tmp_folder) try: dist_hook() finally: # go back to the root directory os.chdir(back) if g_gz in ['gz', 'bz2']: tar = tarfile.open(arch_name, 'w:' + g_gz) tar.add(tmp_folder) tar.close() else: Utils.zip_folder(tmp_folder, arch_name, tmp_folder) try: from hashlib import sha1 as sha except ImportError: from sha import sha try: digest = " (sha=%r)" % sha(Utils.readf(arch_name)).hexdigest() except: digest = '' info('New archive created: %s%s' % (arch_name, digest)) if os.path.exists(tmp_folder): shutil.rmtree(tmp_folder) return arch_name # FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version def distcheck(appname='', version='', subdir=''): '''checks if the sources compile (tarball from 'dist')''' import tempfile, tarfile if not appname: appname = Utils.g_module.APPNAME if not version: version = Utils.g_module.VERSION waf = os.path.abspath(sys.argv[0]) tarball = dist(appname, version) path = appname + '-' + version # remove any previous instance if os.path.exists(path): shutil.rmtree(path) t = tarfile.open(tarball) for x in t: t.extract(x) t.close() # build_path is the directory for the waf invocation if subdir: build_path = os.path.join(path, subdir) else: build_path = path instdir = tempfile.mkdtemp('.inst', '%s-%s' % (appname, version)) ret = Utils.pproc.Popen([waf, 'configure', 'build', 'install', 'uninstall', '--destdir=' + instdir], cwd=build_path).wait() if ret: raise Utils.WafError('distcheck failed with code %i' % ret) if os.path.exists(instdir): raise Utils.WafError('distcheck succeeded, but files were left in %s' % instdir) shutil.rmtree(path) # FIXME remove in Waf 1.6 (kept for compatibility) def add_subdir(dir, bld): bld.recurse(dir, 'build') ntdb-1.0/buildtools/wafadmin/Task.py000066400000000000000000001030101224151530700175160ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2008 (ita) """ Running tasks in parallel is a simple problem, but in practice it is more complicated: * dependencies discovered during the build (dynamic task creation) * dependencies discovered after files are compiled * the amount of tasks and dependencies (graph size) can be huge This is why the dependency management is split on three different levels: 1. groups of tasks that run all after another group of tasks 2. groups of tasks that can be run in parallel 3. tasks that can run in parallel, but with possible unknown ad-hoc dependencies The point #1 represents a strict sequential order between groups of tasks, for example a compiler is produced and used to compile the rest, whereas #2 and #3 represent partial order constraints where #2 applies to the kind of task and #3 applies to the task instances. #1 is held by the task manager: ordered list of TaskGroups (see bld.add_group) #2 is held by the task groups and the task types: precedence after/before (topological sort), and the constraints extracted from file extensions #3 is held by the tasks individually (attribute run_after), and the scheduler (Runner.py) use Task::runnable_status to reorder the tasks -- To try, use something like this in your code: import Constants, Task Task.algotype = Constants.MAXPARALLEL -- There are two concepts with the tasks (individual units of change): * dependency (if 1 is recompiled, recompile 2) * order (run 2 after 1) example 1: if t1 depends on t2 and t2 depends on t3 it is not necessary to make t1 depend on t3 (dependency is transitive) example 2: if t1 depends on a node produced by t2, it is not immediately obvious that t1 must run after t2 (order is not obvious) The role of the Task Manager is to give the tasks in order (groups of task that may be run in parallel one after the other) """ import os, shutil, sys, re, random, datetime, tempfile, shlex from Utils import md5 import Build, Runner, Utils, Node, Logs, Options from Logs import debug, warn, error from Constants import * algotype = NORMAL #algotype = JOBCONTROL #algotype = MAXPARALLEL COMPILE_TEMPLATE_SHELL = ''' def f(task): env = task.env wd = getattr(task, 'cwd', None) p = env.get_flat cmd = \'\'\' %s \'\'\' % s return task.exec_command(cmd, cwd=wd) ''' COMPILE_TEMPLATE_NOSHELL = ''' def f(task): env = task.env wd = getattr(task, 'cwd', None) def to_list(xx): if isinstance(xx, str): return [xx] return xx lst = [] %s lst = [x for x in lst if x] return task.exec_command(lst, cwd=wd) ''' """ Enable different kind of dependency algorithms: 1 make groups: first compile all cpps and then compile all links (NORMAL) 2 parallelize all (each link task run after its dependencies) (MAXPARALLEL) 3 like 1 but provide additional constraints for the parallelization (MAXJOBS) In theory 1. will be faster than 2 for waf, but might be slower for builds The scheme 2 will not allow for running tasks one by one so it can cause disk thrashing on huge builds """ file_deps = Utils.nada """ Additional dependency pre-check may be added by replacing the function file_deps. e.g. extract_outputs, extract_deps below. """ class TaskManager(object): """The manager is attached to the build object, it holds a list of TaskGroup""" def __init__(self): self.groups = [] self.tasks_done = [] self.current_group = 0 self.groups_names = {} def group_name(self, g): """name for the group g (utility)""" if not isinstance(g, TaskGroup): g = self.groups[g] for x in self.groups_names: if id(self.groups_names[x]) == id(g): return x return '' def group_idx(self, tg): """group the task generator tg is in""" se = id(tg) for i in range(len(self.groups)): g = self.groups[i] for t in g.tasks_gen: if id(t) == se: return i return None def get_next_set(self): """return the next set of tasks to execute the first parameter is the maximum amount of parallelization that may occur""" ret = None while not ret and self.current_group < len(self.groups): ret = self.groups[self.current_group].get_next_set() if ret: return ret else: self.groups[self.current_group].process_install() self.current_group += 1 return (None, None) def add_group(self, name=None, set=True): #if self.groups and not self.groups[0].tasks: # error('add_group: an empty group is already present') g = TaskGroup() if name and name in self.groups_names: error('add_group: name %s already present' % name) self.groups_names[name] = g self.groups.append(g) if set: self.current_group = len(self.groups) - 1 def set_group(self, idx): if isinstance(idx, str): g = self.groups_names[idx] for x in xrange(len(self.groups)): if id(g) == id(self.groups[x]): self.current_group = x else: self.current_group = idx def add_task_gen(self, tgen): if not self.groups: self.add_group() self.groups[self.current_group].tasks_gen.append(tgen) def add_task(self, task): if not self.groups: self.add_group() self.groups[self.current_group].tasks.append(task) def total(self): total = 0 if not self.groups: return 0 for group in self.groups: total += len(group.tasks) return total def add_finished(self, tsk): self.tasks_done.append(tsk) bld = tsk.generator.bld if bld.is_install: f = None if 'install' in tsk.__dict__: f = tsk.__dict__['install'] # install=0 to prevent installation if f: f(tsk) else: tsk.install() class TaskGroup(object): "the compilation of one group does not begin until the previous group has finished (in the manager)" def __init__(self): self.tasks = [] # this list will be consumed self.tasks_gen = [] self.cstr_groups = Utils.DefaultDict(list) # tasks having equivalent constraints self.cstr_order = Utils.DefaultDict(set) # partial order between the cstr groups self.temp_tasks = [] # tasks put on hold self.ready = 0 self.post_funs = [] def reset(self): "clears the state of the object (put back the tasks into self.tasks)" for x in self.cstr_groups: self.tasks += self.cstr_groups[x] self.tasks = self.temp_tasks + self.tasks self.temp_tasks = [] self.cstr_groups = Utils.DefaultDict(list) self.cstr_order = Utils.DefaultDict(set) self.ready = 0 def process_install(self): for (f, k, kw) in self.post_funs: f(*k, **kw) def prepare(self): "prepare the scheduling" self.ready = 1 file_deps(self.tasks) self.make_cstr_groups() self.extract_constraints() def get_next_set(self): "next list of tasks to execute using max job settings, returns (maxjobs, task_list)" global algotype if algotype == NORMAL: tasks = self.tasks_in_parallel() maxj = MAXJOBS elif algotype == JOBCONTROL: (maxj, tasks) = self.tasks_by_max_jobs() elif algotype == MAXPARALLEL: tasks = self.tasks_with_inner_constraints() maxj = MAXJOBS else: raise Utils.WafError("unknown algorithm type %s" % (algotype)) if not tasks: return () return (maxj, tasks) def make_cstr_groups(self): "unite the tasks that have similar constraints" self.cstr_groups = Utils.DefaultDict(list) for x in self.tasks: h = x.hash_constraints() self.cstr_groups[h].append(x) def set_order(self, a, b): self.cstr_order[a].add(b) def compare_exts(self, t1, t2): "extension production" x = "ext_in" y = "ext_out" in_ = t1.attr(x, ()) out_ = t2.attr(y, ()) for k in in_: if k in out_: return -1 in_ = t2.attr(x, ()) out_ = t1.attr(y, ()) for k in in_: if k in out_: return 1 return 0 def compare_partial(self, t1, t2): "partial relations after/before" m = "after" n = "before" name = t2.__class__.__name__ if name in Utils.to_list(t1.attr(m, ())): return -1 elif name in Utils.to_list(t1.attr(n, ())): return 1 name = t1.__class__.__name__ if name in Utils.to_list(t2.attr(m, ())): return 1 elif name in Utils.to_list(t2.attr(n, ())): return -1 return 0 def extract_constraints(self): "extract the parallelization constraints from the tasks with different constraints" keys = self.cstr_groups.keys() max = len(keys) # hopefully the length of this list is short for i in xrange(max): t1 = self.cstr_groups[keys[i]][0] for j in xrange(i + 1, max): t2 = self.cstr_groups[keys[j]][0] # add the constraints based on the comparisons val = (self.compare_exts(t1, t2) or self.compare_partial(t1, t2) ) if val > 0: self.set_order(keys[i], keys[j]) elif val < 0: self.set_order(keys[j], keys[i]) def tasks_in_parallel(self): "(NORMAL) next list of tasks that may be executed in parallel" if not self.ready: self.prepare() keys = self.cstr_groups.keys() unconnected = [] remainder = [] for u in keys: for k in self.cstr_order.values(): if u in k: remainder.append(u) break else: unconnected.append(u) toreturn = [] for y in unconnected: toreturn.extend(self.cstr_groups[y]) # remove stuff only after for y in unconnected: try: self.cstr_order.__delitem__(y) except KeyError: pass self.cstr_groups.__delitem__(y) if not toreturn and remainder: raise Utils.WafError("circular order constraint detected %r" % remainder) return toreturn def tasks_by_max_jobs(self): "(JOBCONTROL) returns the tasks that can run in parallel with the max amount of jobs" if not self.ready: self.prepare() if not self.temp_tasks: self.temp_tasks = self.tasks_in_parallel() if not self.temp_tasks: return (None, None) maxjobs = MAXJOBS ret = [] remaining = [] for t in self.temp_tasks: m = getattr(t, "maxjobs", getattr(self.__class__, "maxjobs", MAXJOBS)) if m > maxjobs: remaining.append(t) elif m < maxjobs: remaining += ret ret = [t] maxjobs = m else: ret.append(t) self.temp_tasks = remaining return (maxjobs, ret) def tasks_with_inner_constraints(self): """(MAXPARALLEL) returns all tasks in this group, but add the constraints on each task instance as an optimization, it might be desirable to discard the tasks which do not have to run""" if not self.ready: self.prepare() if getattr(self, "done", None): return None for p in self.cstr_order: for v in self.cstr_order[p]: for m in self.cstr_groups[p]: for n in self.cstr_groups[v]: n.set_run_after(m) self.cstr_order = Utils.DefaultDict(set) self.cstr_groups = Utils.DefaultDict(list) self.done = 1 return self.tasks[:] # make a copy class store_task_type(type): "store the task types that have a name ending in _task into a map (remember the existing task types)" def __init__(cls, name, bases, dict): super(store_task_type, cls).__init__(name, bases, dict) name = cls.__name__ if name.endswith('_task'): name = name.replace('_task', '') if name != 'TaskBase': TaskBase.classes[name] = cls class TaskBase(object): """Base class for all Waf tasks The most important methods are (by usual order of call): 1 runnable_status: ask the task if it should be run, skipped, or if we have to ask later 2 __str__: string to display to the user 3 run: execute the task 4 post_run: after the task is run, update the cache about the task This class should be seen as an interface, it provides the very minimum necessary for the scheduler so it does not do much. For illustration purposes, TaskBase instances try to execute self.fun (if provided) """ __metaclass__ = store_task_type color = "GREEN" maxjobs = MAXJOBS classes = {} stat = None def __init__(self, *k, **kw): self.hasrun = NOT_RUN try: self.generator = kw['generator'] except KeyError: self.generator = self self.bld = Build.bld if kw.get('normal', 1): self.generator.bld.task_manager.add_task(self) def __repr__(self): "used for debugging" return '\n\t{task: %s %s}' % (self.__class__.__name__, str(getattr(self, "fun", ""))) def __str__(self): "string to display to the user" if hasattr(self, 'fun'): return 'executing: %s\n' % self.fun.__name__ return self.__class__.__name__ + '\n' def exec_command(self, *k, **kw): "use this for executing commands from tasks" # TODO in waf 1.6, eliminate bld.exec_command, and move the cwd processing to here if self.env['env']: kw['env'] = self.env['env'] return self.generator.bld.exec_command(*k, **kw) def runnable_status(self): "RUN_ME SKIP_ME or ASK_LATER" return RUN_ME def can_retrieve_cache(self): return False def call_run(self): if self.can_retrieve_cache(): return 0 return self.run() def run(self): "called if the task must run" if hasattr(self, 'fun'): return self.fun(self) return 0 def post_run(self): "update the dependency tree (node stats)" pass def display(self): "print either the description (using __str__) or the progress bar or the ide output" col1 = Logs.colors(self.color) col2 = Logs.colors.NORMAL if Options.options.progress_bar == 1: return self.generator.bld.progress_line(self.position[0], self.position[1], col1, col2) if Options.options.progress_bar == 2: ela = Utils.get_elapsed_time(self.generator.bld.ini) try: ins = ','.join([n.name for n in self.inputs]) except AttributeError: ins = '' try: outs = ','.join([n.name for n in self.outputs]) except AttributeError: outs = '' return '|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n' % (self.position[1], self.position[0], ins, outs, ela) total = self.position[1] n = len(str(total)) fs = '[%%%dd/%%%dd] %%s%%s%%s' % (n, n) return fs % (self.position[0], self.position[1], col1, str(self), col2) def attr(self, att, default=None): "retrieve an attribute from the instance or from the class (microoptimization here)" ret = getattr(self, att, self) if ret is self: return getattr(self.__class__, att, default) return ret def hash_constraints(self): "identify a task type for all the constraints relevant for the scheduler: precedence, file production" a = self.attr sum = hash((self.__class__.__name__, str(a('before', '')), str(a('after', '')), str(a('ext_in', '')), str(a('ext_out', '')), self.__class__.maxjobs)) return sum def format_error(self): "error message to display to the user (when a build fails)" if getattr(self, "err_msg", None): return self.err_msg elif self.hasrun == CRASHED: try: return " -> task failed (err #%d): %r" % (self.err_code, self) except AttributeError: return " -> task failed: %r" % self elif self.hasrun == MISSING: return " -> missing files: %r" % self else: return '' def install(self): """ installation is performed by looking at the task attributes: * install_path: installation path like "${PREFIX}/bin" * filename: install the first node in the outputs as a file with a particular name, be certain to give os.sep * chmod: permissions """ bld = self.generator.bld d = self.attr('install') if self.attr('install_path'): lst = [a.relpath_gen(bld.srcnode) for a in self.outputs] perm = self.attr('chmod', O644) if self.attr('src'): # if src is given, install the sources too lst += [a.relpath_gen(bld.srcnode) for a in self.inputs] if self.attr('filename'): dir = self.install_path.rstrip(os.sep) + os.sep + self.attr('filename') bld.install_as(dir, lst[0], self.env, perm) else: bld.install_files(self.install_path, lst, self.env, perm) class Task(TaskBase): """The parent class is quite limited, in this version: * file system interaction: input and output nodes * persistence: do not re-execute tasks that have already run * caching: same files can be saved and retrieved from a cache directory * dependencies: implicit, like .c files depending on .h files explicit, like the input nodes or the dep_nodes environment variables, like the CXXFLAGS in self.env """ vars = [] def __init__(self, env, **kw): TaskBase.__init__(self, **kw) self.env = env # inputs and outputs are nodes # use setters when possible self.inputs = [] self.outputs = [] self.dep_nodes = [] self.run_after = [] # Additionally, you may define the following #self.dep_vars = 'PREFIX DATADIR' def __str__(self): "string to display to the user" env = self.env src_str = ' '.join([a.nice_path(env) for a in self.inputs]) tgt_str = ' '.join([a.nice_path(env) for a in self.outputs]) if self.outputs: sep = ' -> ' else: sep = '' return '%s: %s%s%s\n' % (self.__class__.__name__.replace('_task', ''), src_str, sep, tgt_str) def __repr__(self): return "".join(['\n\t{task: ', self.__class__.__name__, " ", ",".join([x.name for x in self.inputs]), " -> ", ",".join([x.name for x in self.outputs]), '}']) def unique_id(self): "get a unique id: hash the node paths, the variant, the class, the function" try: return self.uid except AttributeError: "this is not a real hot zone, but we want to avoid surprizes here" m = md5() up = m.update up(self.__class__.__name__) up(self.env.variant()) p = None for x in self.inputs + self.outputs: if p != x.parent.id: p = x.parent.id up(x.parent.abspath()) up(x.name) self.uid = m.digest() return self.uid def set_inputs(self, inp): if isinstance(inp, list): self.inputs += inp else: self.inputs.append(inp) def set_outputs(self, out): if isinstance(out, list): self.outputs += out else: self.outputs.append(out) def set_run_after(self, task): "set (scheduler) order on another task" # TODO: handle list or object assert isinstance(task, TaskBase) self.run_after.append(task) def add_file_dependency(self, filename): "TODO user-provided file dependencies" node = self.generator.bld.path.find_resource(filename) self.dep_nodes.append(node) def signature(self): # compute the result one time, and suppose the scan_signature will give the good result try: return self.cache_sig[0] except AttributeError: pass self.m = md5() # explicit deps exp_sig = self.sig_explicit_deps() # env vars var_sig = self.sig_vars() # implicit deps imp_sig = SIG_NIL if self.scan: try: imp_sig = self.sig_implicit_deps() except ValueError: return self.signature() # we now have the signature (first element) and the details (for debugging) ret = self.m.digest() self.cache_sig = (ret, exp_sig, imp_sig, var_sig) return ret def runnable_status(self): "SKIP_ME RUN_ME or ASK_LATER" #return 0 # benchmarking if self.inputs and (not self.outputs): if not getattr(self.__class__, 'quiet', None): warn("invalid task (no inputs OR outputs): override in a Task subclass or set the attribute 'quiet' %r" % self) for t in self.run_after: if not t.hasrun: return ASK_LATER env = self.env bld = self.generator.bld # first compute the signature new_sig = self.signature() # compare the signature to a signature computed previously key = self.unique_id() try: prev_sig = bld.task_sigs[key][0] except KeyError: debug("task: task %r must run as it was never run before or the task code changed", self) return RUN_ME # compare the signatures of the outputs for node in self.outputs: variant = node.variant(env) try: if bld.node_sigs[variant][node.id] != new_sig: return RUN_ME except KeyError: debug("task: task %r must run as the output nodes do not exist", self) return RUN_ME # debug if asked to if Logs.verbose: self.debug_why(bld.task_sigs[key]) if new_sig != prev_sig: return RUN_ME return SKIP_ME def post_run(self): "called after a successful task run" bld = self.generator.bld env = self.env sig = self.signature() ssig = sig.encode('hex') variant = env.variant() for node in self.outputs: # check if the node exists .. try: os.stat(node.abspath(env)) except OSError: self.hasrun = MISSING self.err_msg = '-> missing file: %r' % node.abspath(env) raise Utils.WafError # important, store the signature for the next run bld.node_sigs[variant][node.id] = sig bld.task_sigs[self.unique_id()] = self.cache_sig # file caching, if possible # try to avoid data corruption as much as possible if not Options.cache_global or Options.options.nocache or not self.outputs: return None if getattr(self, 'cached', None): return None dname = os.path.join(Options.cache_global, ssig) tmpdir = tempfile.mkdtemp(prefix=Options.cache_global + os.sep + 'waf') try: shutil.rmtree(dname) except: pass try: i = 0 for node in self.outputs: variant = node.variant(env) dest = os.path.join(tmpdir, str(i) + node.name) shutil.copy2(node.abspath(env), dest) i += 1 except (OSError, IOError): try: shutil.rmtree(tmpdir) except: pass else: try: os.rename(tmpdir, dname) except OSError: try: shutil.rmtree(tmpdir) except: pass else: try: os.chmod(dname, O755) except: pass def can_retrieve_cache(self): """ Retrieve build nodes from the cache update the file timestamps to help cleaning the least used entries from the cache additionally, set an attribute 'cached' to avoid re-creating the same cache files suppose there are files in cache/dir1/file1 and cache/dir2/file2 first, read the timestamp of dir1 then try to copy the files then look at the timestamp again, if it has changed, the data may have been corrupt (cache update by another process) should an exception occur, ignore the data """ if not Options.cache_global or Options.options.nocache or not self.outputs: return None env = self.env sig = self.signature() ssig = sig.encode('hex') # first try to access the cache folder for the task dname = os.path.join(Options.cache_global, ssig) try: t1 = os.stat(dname).st_mtime except OSError: return None i = 0 for node in self.outputs: variant = node.variant(env) orig = os.path.join(dname, str(i) + node.name) try: shutil.copy2(orig, node.abspath(env)) # mark the cache file as used recently (modified) os.utime(orig, None) except (OSError, IOError): debug('task: failed retrieving file') return None i += 1 # is it the same folder? try: t2 = os.stat(dname).st_mtime except OSError: return None if t1 != t2: return None for node in self.outputs: self.generator.bld.node_sigs[variant][node.id] = sig if Options.options.progress_bar < 1: self.generator.bld.printout('restoring from cache %r\n' % node.bldpath(env)) self.cached = True return 1 def debug_why(self, old_sigs): "explains why a task is run" new_sigs = self.cache_sig def v(x): return x.encode('hex') debug("Task %r", self) msgs = ['Task must run', '* Source file or manual dependency', '* Implicit dependency', '* Environment variable'] tmp = 'task: -> %s: %s %s' for x in xrange(len(msgs)): if (new_sigs[x] != old_sigs[x]): debug(tmp, msgs[x], v(old_sigs[x]), v(new_sigs[x])) def sig_explicit_deps(self): bld = self.generator.bld up = self.m.update # the inputs for x in self.inputs + getattr(self, 'dep_nodes', []): if not x.parent.id in bld.cache_scanned_folders: bld.rescan(x.parent) variant = x.variant(self.env) try: up(bld.node_sigs[variant][x.id]) except KeyError: raise Utils.WafError('Missing node signature for %r (required by %r)' % (x, self)) # manual dependencies, they can slow down the builds if bld.deps_man: additional_deps = bld.deps_man for x in self.inputs + self.outputs: try: d = additional_deps[x.id] except KeyError: continue for v in d: if isinstance(v, Node.Node): bld.rescan(v.parent) variant = v.variant(self.env) try: v = bld.node_sigs[variant][v.id] except KeyError: raise Utils.WafError('Missing node signature for %r (required by %r)' % (v, self)) elif hasattr(v, '__call__'): v = v() # dependency is a function, call it up(v) for x in self.dep_nodes: v = bld.node_sigs[x.variant(self.env)][x.id] up(v) return self.m.digest() def sig_vars(self): bld = self.generator.bld env = self.env # dependencies on the environment vars act_sig = bld.hash_env_vars(env, self.__class__.vars) self.m.update(act_sig) # additional variable dependencies, if provided dep_vars = getattr(self, 'dep_vars', None) if dep_vars: self.m.update(bld.hash_env_vars(env, dep_vars)) return self.m.digest() #def scan(self, node): # """this method returns a tuple containing: # * a list of nodes corresponding to real files # * a list of names for files not found in path_lst # the input parameters may have more parameters that the ones used below # """ # return ((), ()) scan = None # compute the signature, recompute it if there is no match in the cache def sig_implicit_deps(self): "the signature obtained may not be the one if the files have changed, we do it in two steps" bld = self.generator.bld # get the task signatures from previous runs key = self.unique_id() prev_sigs = bld.task_sigs.get(key, ()) if prev_sigs: try: # for issue #379 if prev_sigs[2] == self.compute_sig_implicit_deps(): return prev_sigs[2] except (KeyError, OSError): pass del bld.task_sigs[key] raise ValueError('rescan') # no previous run or the signature of the dependencies has changed, rescan the dependencies (nodes, names) = self.scan() if Logs.verbose: debug('deps: scanner for %s returned %s %s', str(self), str(nodes), str(names)) # store the dependencies in the cache bld.node_deps[key] = nodes bld.raw_deps[key] = names # recompute the signature and return it try: sig = self.compute_sig_implicit_deps() except KeyError: try: nodes = [] for k in bld.node_deps.get(self.unique_id(), []): if k.id & 3 == 2: # Node.FILE: if not k.id in bld.node_sigs[0]: nodes.append(k) else: if not k.id in bld.node_sigs[self.env.variant()]: nodes.append(k) except: nodes = '?' raise Utils.WafError('Missing node signature for %r (for implicit dependencies %r)' % (nodes, self)) return sig def compute_sig_implicit_deps(self): """it is intended for .cpp and inferred .h files there is a single list (no tree traversal) this is the hot spot so ... do not touch""" upd = self.m.update bld = self.generator.bld tstamp = bld.node_sigs env = self.env for k in bld.node_deps.get(self.unique_id(), []): # unlikely but necessary if it happens if not k.parent.id in bld.cache_scanned_folders: # if the parent folder is removed, an OSError may be thrown bld.rescan(k.parent) # if the parent folder is removed, a KeyError will be thrown if k.id & 3 == 2: # Node.FILE: upd(tstamp[0][k.id]) else: upd(tstamp[env.variant()][k.id]) return self.m.digest() def funex(c): dc = {} exec(c, dc) return dc['f'] reg_act = re.compile(r"(?P\\)|(?P\$\$)|(?P\$\{(?P\w+)(?P.*?)\})", re.M) def compile_fun_shell(name, line): """Compiles a string (once) into a function, eg: simple_task_type('c++', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}') The env variables (CXX, ..) on the task must not hold dicts (order) The reserved keywords TGT and SRC represent the task input and output nodes quick test: bld(source='wscript', rule='echo "foo\\${SRC[0].name}\\bar"') """ extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return '\\\\' elif g('subst'): extr.append((g('var'), g('code'))); return "%s" return None line = reg_act.sub(repl, line) or line parm = [] dvars = [] app = parm.append for (var, meth) in extr: if var == 'SRC': if meth: app('task.inputs%s' % meth) else: app('" ".join([a.srcpath(env) for a in task.inputs])') elif var == 'TGT': if meth: app('task.outputs%s' % meth) else: app('" ".join([a.bldpath(env) for a in task.outputs])') else: if not var in dvars: dvars.append(var) app("p('%s')" % var) if parm: parm = "%% (%s) " % (',\n\t\t'.join(parm)) else: parm = '' c = COMPILE_TEMPLATE_SHELL % (line, parm) debug('action: %s', c) return (funex(c), dvars) def compile_fun_noshell(name, line): extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('subst'): extr.append((g('var'), g('code'))); return "<<|@|>>" return None line2 = reg_act.sub(repl, line) params = line2.split('<<|@|>>') buf = [] dvars = [] app = buf.append for x in xrange(len(extr)): params[x] = params[x].strip() if params[x]: app("lst.extend(%r)" % params[x].split()) (var, meth) = extr[x] if var == 'SRC': if meth: app('lst.append(task.inputs%s)' % meth) else: app("lst.extend([a.srcpath(env) for a in task.inputs])") elif var == 'TGT': if meth: app('lst.append(task.outputs%s)' % meth) else: app("lst.extend([a.bldpath(env) for a in task.outputs])") else: app('lst.extend(to_list(env[%r]))' % var) if not var in dvars: dvars.append(var) if params[-1]: app("lst.extend(%r)" % shlex.split(params[-1])) fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf) debug('action: %s', fun) return (funex(fun), dvars) def compile_fun(name, line, shell=None): "commands can be launched by the shell or not" if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0: shell = True #else: # shell = False if shell is None: if sys.platform == 'win32': shell = False else: shell = True if shell: return compile_fun_shell(name, line) else: return compile_fun_noshell(name, line) def simple_task_type(name, line, color='GREEN', vars=[], ext_in=[], ext_out=[], before=[], after=[], shell=None): """return a new Task subclass with the function run compiled from the line given""" (fun, dvars) = compile_fun(name, line, shell) fun.code = line return task_type_from_func(name, fun, vars or dvars, color, ext_in, ext_out, before, after) def task_type_from_func(name, func, vars=[], color='GREEN', ext_in=[], ext_out=[], before=[], after=[]): """return a new Task subclass with the function run compiled from the line given""" params = { 'run': func, 'vars': vars, 'color': color, 'name': name, 'ext_in': Utils.to_list(ext_in), 'ext_out': Utils.to_list(ext_out), 'before': Utils.to_list(before), 'after': Utils.to_list(after), } cls = type(Task)(name, (Task,), params) TaskBase.classes[name] = cls return cls def always_run(cls): """Set all task instances of this class to be executed whenever a build is started The task signature is calculated, but the result of the comparation between task signatures is bypassed """ old = cls.runnable_status def always(self): ret = old(self) if ret == SKIP_ME: return RUN_ME return ret cls.runnable_status = always def update_outputs(cls): """When a command is always run, it is possible that the output only change sometimes. By default the build node have as a hash the signature of the task which may not change. With this, the output nodes (produced) are hashed, and the hashes are set to the build nodes This may avoid unnecessary recompilations, but it uses more resources (hashing the output files) so it is not used by default """ old_post_run = cls.post_run def post_run(self): old_post_run(self) bld = self.generator.bld for output in self.outputs: bld.node_sigs[self.env.variant()][output.id] = Utils.h_file(output.abspath(self.env)) bld.task_sigs[output.id] = self.unique_id() cls.post_run = post_run old_runnable_status = cls.runnable_status def runnable_status(self): status = old_runnable_status(self) if status != RUN_ME: return status uid = self.unique_id() try: bld = self.outputs[0].__class__.bld new_sig = self.signature() prev_sig = bld.task_sigs[uid][0] if prev_sig == new_sig: for x in self.outputs: if not x.id in bld.node_sigs[self.env.variant()]: return RUN_ME if bld.task_sigs[x.id] != uid: # ensure the outputs are associated with *this* task return RUN_ME return SKIP_ME except KeyError: pass except IndexError: pass return RUN_ME cls.runnable_status = runnable_status def extract_outputs(tasks): """file_deps: Infer additional dependencies from task input and output nodes """ v = {} for x in tasks: try: (ins, outs) = v[x.env.variant()] except KeyError: ins = {} outs = {} v[x.env.variant()] = (ins, outs) for a in getattr(x, 'inputs', []): try: ins[a.id].append(x) except KeyError: ins[a.id] = [x] for a in getattr(x, 'outputs', []): try: outs[a.id].append(x) except KeyError: outs[a.id] = [x] for (ins, outs) in v.values(): links = set(ins.iterkeys()).intersection(outs.iterkeys()) for k in links: for a in ins[k]: for b in outs[k]: a.set_run_after(b) def extract_deps(tasks): """file_deps: Infer additional dependencies from task input and output nodes and from implicit dependencies returned by the scanners - that will only work if all tasks are created this is aimed at people who have pathological builds and who do not care enough to implement the build dependencies properly with two loops over the list of tasks, do not expect this to be really fast """ # first reuse the function above extract_outputs(tasks) # map the output nodes to the tasks producing them out_to_task = {} for x in tasks: v = x.env.variant() try: lst = x.outputs except AttributeError: pass else: for node in lst: out_to_task[(v, node.id)] = x # map the dependencies found to the tasks compiled dep_to_task = {} for x in tasks: try: x.signature() except: # this is on purpose pass v = x.env.variant() key = x.unique_id() for k in x.generator.bld.node_deps.get(x.unique_id(), []): try: dep_to_task[(v, k.id)].append(x) except KeyError: dep_to_task[(v, k.id)] = [x] # now get the intersection deps = set(dep_to_task.keys()).intersection(set(out_to_task.keys())) # and add the dependencies from task to task for idx in deps: for k in dep_to_task[idx]: k.set_run_after(out_to_task[idx]) # cleanup, remove the signatures for x in tasks: try: delattr(x, 'cache_sig') except AttributeError: pass ntdb-1.0/buildtools/wafadmin/TaskGen.py000066400000000000000000000424241224151530700201630ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2008 (ita) """ The class task_gen encapsulates the creation of task objects (low-level code) The instances can have various parameters, but the creation of task nodes (Task.py) is delayed. To achieve this, various methods are called from the method "apply" The class task_gen contains lots of methods, and a configuration table: * the methods to call (self.meths) can be specified dynamically (removing, adding, ..) * the order of the methods (self.prec or by default task_gen.prec) is configurable * new methods can be inserted dynamically without pasting old code Additionally, task_gen provides the method apply_core * file extensions are mapped to methods: def meth(self, name_or_node) * if a mapping is not found in self.mappings, it is searched in task_gen.mappings * when called, the functions may modify self.allnodes to re-add source to process * the mappings can map an extension or a filename (see the code below) WARNING: subclasses must reimplement the clone method """ import os, traceback, copy import Build, Task, Utils, Logs, Options from Logs import debug, error, warn from Constants import * typos = { 'sources':'source', 'targets':'target', 'include':'includes', 'define':'defines', 'importpath':'importpaths', 'install_var':'install_path', 'install_subdir':'install_path', 'inst_var':'install_path', 'inst_dir':'install_path', 'feature':'features', } class register_obj(type): """no decorators for classes, so we use a metaclass we store into task_gen.classes the classes that inherit task_gen and whose names end in '_taskgen' """ def __init__(cls, name, bases, dict): super(register_obj, cls).__init__(name, bases, dict) name = cls.__name__ suffix = '_taskgen' if name.endswith(suffix): task_gen.classes[name.replace(suffix, '')] = cls class task_gen(object): """ Most methods are of the form 'def meth(self):' without any parameters there are many of them, and they do many different things: * task creation * task results installation * environment modification * attribute addition/removal The inheritance approach is complicated * mixing several languages at once * subclassing is needed even for small changes * inserting new methods is complicated This new class uses a configuration table: * adding new methods easily * obtaining the order in which to call the methods * postponing the method calls (post() -> apply) Additionally, a 'traits' static attribute is provided: * this list contains methods * the methods can remove or add methods from self.meths Example1: the attribute 'staticlib' is set on an instance a method set in the list of traits is executed when the instance is posted, it finds that flag and adds another method for execution Example2: a method set in the list of traits finds the msvc compiler (from self.env['MSVC']==1); more methods are added to self.meths """ __metaclass__ = register_obj mappings = {} mapped = {} prec = Utils.DefaultDict(list) traits = Utils.DefaultDict(set) classes = {} def __init__(self, *kw, **kwargs): self.prec = Utils.DefaultDict(list) "map precedence of function names to call" # so we will have to play with directed acyclic graphs # detect cycles, etc self.source = '' self.target = '' # list of methods to execute - does not touch it by hand unless you know self.meths = [] # list of mappings extension -> function self.mappings = {} # list of features (see the documentation on traits) self.features = list(kw) # not always a good idea self.tasks = [] self.default_chmod = O644 self.default_install_path = None # kind of private, beware of what you put in it, also, the contents are consumed self.allnodes = [] self.bld = kwargs.get('bld', Build.bld) self.env = self.bld.env.copy() self.path = self.bld.path # emulate chdir when reading scripts self.name = '' # give a name to the target (static+shlib with the same targetname ambiguity) # provide a unique id self.idx = self.bld.idx[self.path.id] = self.bld.idx.get(self.path.id, 0) + 1 for key, val in kwargs.iteritems(): setattr(self, key, val) self.bld.task_manager.add_task_gen(self) self.bld.all_task_gen.append(self) def __str__(self): return ("" % (self.name or self.target, self.__class__.__name__, str(self.path))) def __setattr__(self, name, attr): real = typos.get(name, name) if real != name: warn('typo %s -> %s' % (name, real)) if Logs.verbose > 0: traceback.print_stack() object.__setattr__(self, real, attr) def to_list(self, value): "helper: returns a list" if isinstance(value, str): return value.split() else: return value def apply(self): "order the methods to execute using self.prec or task_gen.prec" keys = set(self.meths) # add the methods listed in the features self.features = Utils.to_list(self.features) for x in self.features + ['*']: st = task_gen.traits[x] if not st: warn('feature %r does not exist - bind at least one method to it' % x) keys.update(st) # copy the precedence table prec = {} prec_tbl = self.prec or task_gen.prec for x in prec_tbl: if x in keys: prec[x] = prec_tbl[x] # elements disconnected tmp = [] for a in keys: for x in prec.values(): if a in x: break else: tmp.append(a) # topological sort out = [] while tmp: e = tmp.pop() if e in keys: out.append(e) try: nlst = prec[e] except KeyError: pass else: del prec[e] for x in nlst: for y in prec: if x in prec[y]: break else: tmp.append(x) if prec: raise Utils.WafError("graph has a cycle %s" % str(prec)) out.reverse() self.meths = out # then we run the methods in order debug('task_gen: posting %s %d', self, id(self)) for x in out: try: v = getattr(self, x) except AttributeError: raise Utils.WafError("tried to retrieve %s which is not a valid method" % x) debug('task_gen: -> %s (%d)', x, id(self)) v() def post(self): "runs the code to create the tasks, do not subclass" if not self.name: if isinstance(self.target, list): self.name = ' '.join(self.target) else: self.name = self.target if getattr(self, 'posted', None): #error("OBJECT ALREADY POSTED" + str( self)) return self.apply() self.posted = True debug('task_gen: posted %s', self.name) def get_hook(self, ext): try: return self.mappings[ext] except KeyError: try: return task_gen.mappings[ext] except KeyError: return None # TODO waf 1.6: always set the environment # TODO waf 1.6: create_task(self, name, inputs, outputs) def create_task(self, name, src=None, tgt=None, env=None): env = env or self.env task = Task.TaskBase.classes[name](env.copy(), generator=self) if src: task.set_inputs(src) if tgt: task.set_outputs(tgt) self.tasks.append(task) return task def name_to_obj(self, name): return self.bld.name_to_obj(name, self.env) def find_sources_in_dirs(self, dirnames, excludes=[], exts=[]): """ The attributes "excludes" and "exts" must be lists to avoid the confusion find_sources_in_dirs('a', 'b', 'c') <-> find_sources_in_dirs('a b c') do not use absolute paths do not use paths outside of the source tree the files or folder beginning by . are not returned # TODO: remove in Waf 1.6 """ err_msg = "'%s' attribute must be a list" if not isinstance(excludes, list): raise Utils.WscriptError(err_msg % 'excludes') if not isinstance(exts, list): raise Utils.WscriptError(err_msg % 'exts') lst = [] #make sure dirnames is a list helps with dirnames with spaces dirnames = self.to_list(dirnames) ext_lst = exts or list(self.mappings.keys()) + list(task_gen.mappings.keys()) for name in dirnames: anode = self.path.find_dir(name) if not anode or not anode.is_child_of(self.bld.srcnode): raise Utils.WscriptError("Unable to use '%s' - either because it's not a relative path" \ ", or it's not child of '%s'." % (name, self.bld.srcnode)) self.bld.rescan(anode) for name in self.bld.cache_dir_contents[anode.id]: # ignore hidden files if name.startswith('.'): continue (base, ext) = os.path.splitext(name) if ext in ext_lst and not name in lst and not name in excludes: lst.append((anode.relpath_gen(self.path) or '.') + os.path.sep + name) lst.sort() self.source = self.to_list(self.source) if not self.source: self.source = lst else: self.source += lst def clone(self, env): """when creating a clone in a task generator method, make sure to set posted=False on the clone else the other task generator will not create its tasks""" newobj = task_gen(bld=self.bld) for x in self.__dict__: if x in ['env', 'bld']: continue elif x in ["path", "features"]: setattr(newobj, x, getattr(self, x)) else: setattr(newobj, x, copy.copy(getattr(self, x))) newobj.__class__ = self.__class__ if isinstance(env, str): newobj.env = self.bld.all_envs[env].copy() else: newobj.env = env.copy() return newobj def get_inst_path(self): return getattr(self, '_install_path', getattr(self, 'default_install_path', '')) def set_inst_path(self, val): self._install_path = val install_path = property(get_inst_path, set_inst_path) def get_chmod(self): return getattr(self, '_chmod', getattr(self, 'default_chmod', O644)) def set_chmod(self, val): self._chmod = val chmod = property(get_chmod, set_chmod) def declare_extension(var, func): try: for x in Utils.to_list(var): task_gen.mappings[x] = func except: raise Utils.WscriptError('declare_extension takes either a list or a string %r' % var) task_gen.mapped[func.__name__] = func def declare_order(*k): assert(len(k) > 1) n = len(k) - 1 for i in xrange(n): f1 = k[i] f2 = k[i+1] if not f1 in task_gen.prec[f2]: task_gen.prec[f2].append(f1) def declare_chain(name='', action='', ext_in='', ext_out='', reentrant=True, color='BLUE', install=0, before=[], after=[], decider=None, rule=None, scan=None): """ see Tools/flex.py for an example while i do not like such wrappers, some people really do """ action = action or rule if isinstance(action, str): act = Task.simple_task_type(name, action, color=color) else: act = Task.task_type_from_func(name, action, color=color) act.ext_in = tuple(Utils.to_list(ext_in)) act.ext_out = tuple(Utils.to_list(ext_out)) act.before = Utils.to_list(before) act.after = Utils.to_list(after) act.scan = scan def x_file(self, node): if decider: ext = decider(self, node) else: ext = ext_out if isinstance(ext, str): out_source = node.change_ext(ext) if reentrant: self.allnodes.append(out_source) elif isinstance(ext, list): out_source = [node.change_ext(x) for x in ext] if reentrant: for i in xrange((reentrant is True) and len(out_source) or reentrant): self.allnodes.append(out_source[i]) else: # XXX: useless: it will fail on Utils.to_list above... raise Utils.WafError("do not know how to process %s" % str(ext)) tsk = self.create_task(name, node, out_source) if node.__class__.bld.is_install: tsk.install = install declare_extension(act.ext_in, x_file) return x_file def bind_feature(name, methods): lst = Utils.to_list(methods) task_gen.traits[name].update(lst) """ All the following decorators are registration decorators, i.e add an attribute to current class (task_gen and its derivatives), with same name as func, which points to func itself. For example: @taskgen def sayHi(self): print("hi") Now taskgen.sayHi() may be called If python were really smart, it could infer itself the order of methods by looking at the attributes. A prerequisite for execution is to have the attribute set before. Intelligent compilers binding aspect-oriented programming and parallelization, what a nice topic for studies. """ def taskgen(func): """ register a method as a task generator method """ setattr(task_gen, func.__name__, func) return func def feature(*k): """ declare a task generator method that will be executed when the object attribute 'feature' contains the corresponding key(s) """ def deco(func): setattr(task_gen, func.__name__, func) for name in k: task_gen.traits[name].update([func.__name__]) return func return deco def before(*k): """ declare a task generator method which will be executed before the functions of given name(s) """ def deco(func): setattr(task_gen, func.__name__, func) for fun_name in k: if not func.__name__ in task_gen.prec[fun_name]: task_gen.prec[fun_name].append(func.__name__) return func return deco def after(*k): """ declare a task generator method which will be executed after the functions of given name(s) """ def deco(func): setattr(task_gen, func.__name__, func) for fun_name in k: if not fun_name in task_gen.prec[func.__name__]: task_gen.prec[func.__name__].append(fun_name) return func return deco def extension(var): """ declare a task generator method which will be invoked during the processing of source files for the extension given """ def deco(func): setattr(task_gen, func.__name__, func) try: for x in Utils.to_list(var): task_gen.mappings[x] = func except: raise Utils.WafError('extension takes either a list or a string %r' % var) task_gen.mapped[func.__name__] = func return func return deco # TODO make certain the decorators may be used here def apply_core(self): """Process the attribute source transform the names into file nodes try to process the files by name first, later by extension""" # get the list of folders to use by the scanners # all our objects share the same include paths anyway find_resource = self.path.find_resource for filename in self.to_list(self.source): # if self.mappings or task_gen.mappings contains a file of the same name x = self.get_hook(filename) if x: x(self, filename) else: node = find_resource(filename) if not node: raise Utils.WafError("source not found: '%s' in '%s'" % (filename, str(self.path))) self.allnodes.append(node) for node in self.allnodes: # self.mappings or task_gen.mappings map the file extension to a function x = self.get_hook(node.suffix()) if not x: raise Utils.WafError("Cannot guess how to process %s (got mappings %r in %r) -> try conf.check_tool(..)?" % \ (str(node), self.__class__.mappings.keys(), self.__class__)) x(self, node) feature('*')(apply_core) def exec_rule(self): """Process the attribute rule, when provided the method apply_core will be disabled """ if not getattr(self, 'rule', None): return # someone may have removed it already try: self.meths.remove('apply_core') except ValueError: pass # get the function and the variables func = self.rule vars2 = [] if isinstance(func, str): # use the shell by default for user-defined commands (func, vars2) = Task.compile_fun('', self.rule, shell=getattr(self, 'shell', True)) func.code = self.rule # create the task class name = getattr(self, 'name', None) or self.target or self.rule if not isinstance(name, str): name = str(self.idx) cls = Task.task_type_from_func(name, func, getattr(self, 'vars', vars2)) cls.color = getattr(self, 'color', 'BLUE') # now create one instance tsk = self.create_task(name) dep_vars = getattr(self, 'dep_vars', ['ruledeps']) if dep_vars: tsk.dep_vars = dep_vars if isinstance(self.rule, str): tsk.env.ruledeps = self.rule else: # only works if the function is in a global module such as a waf tool tsk.env.ruledeps = Utils.h_fun(self.rule) # we assume that the user knows that without inputs or outputs #if not getattr(self, 'target', None) and not getattr(self, 'source', None): # cls.quiet = True if getattr(self, 'target', None): cls.quiet = True tsk.outputs = [self.path.find_or_declare(x) for x in self.to_list(self.target)] if getattr(self, 'source', None): cls.quiet = True tsk.inputs = [] for x in self.to_list(self.source): y = self.path.find_resource(x) if not y: raise Utils.WafError('input file %r could not be found (%r)' % (x, self.path.abspath())) tsk.inputs.append(y) if self.allnodes: tsk.inputs.extend(self.allnodes) if getattr(self, 'scan', None): cls.scan = self.scan if getattr(self, 'install_path', None): tsk.install_path = self.install_path if getattr(self, 'cwd', None): tsk.cwd = self.cwd if getattr(self, 'on_results', None): Task.update_outputs(cls) if getattr(self, 'always', None): Task.always_run(cls) for x in ['after', 'before', 'ext_in', 'ext_out']: setattr(cls, x, getattr(self, x, [])) feature('*')(exec_rule) before('apply_core')(exec_rule) def sequence_order(self): """ add a strict sequential constraint between the tasks generated by task generators it uses the fact that task generators are posted in order it will not post objects which belong to other folders there is also an awesome trick for executing the method in last position to use: bld(features='javac seq') bld(features='jar seq') to start a new sequence, set the attribute seq_start, for example: obj.seq_start = True """ if self.meths and self.meths[-1] != 'sequence_order': self.meths.append('sequence_order') return if getattr(self, 'seq_start', None): return # all the tasks previously declared must be run before these if getattr(self.bld, 'prev', None): self.bld.prev.post() for x in self.bld.prev.tasks: for y in self.tasks: y.set_run_after(x) self.bld.prev = self feature('seq')(sequence_order) ntdb-1.0/buildtools/wafadmin/Tools/000077500000000000000000000000001224151530700173475ustar00rootroot00000000000000ntdb-1.0/buildtools/wafadmin/Tools/__init__.py000066400000000000000000000001031224151530700214520ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) ntdb-1.0/buildtools/wafadmin/Tools/ar.py000066400000000000000000000015351224151530700203270ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2008 (ita) # Ralf Habacker, 2006 (rh) "ar and ranlib" import os, sys import Task, Utils from Configure import conftest ar_str = '${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}' cls = Task.simple_task_type('static_link', ar_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False) cls.maxjobs = 1 cls.install = Utils.nada # remove the output in case it already exists old = cls.run def wrap(self): try: os.remove(self.outputs[0].abspath(self.env)) except OSError: pass return old(self) setattr(cls, 'run', wrap) def detect(conf): conf.find_program('ar', var='AR') conf.find_program('ranlib', var='RANLIB') conf.env.ARFLAGS = 'rcs' @conftest def find_ar(conf): v = conf.env conf.check_tool('ar') if not v['AR']: conf.fatal('ar is required for static libraries - not found') ntdb-1.0/buildtools/wafadmin/Tools/bison.py000066400000000000000000000020001224151530700210230ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # John O'Meara, 2006 # Thomas Nagy 2009 "Bison processing" import Task from TaskGen import extension bison = '${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}' cls = Task.simple_task_type('bison', bison, 'GREEN', ext_in='.yc .y .yy', ext_out='.c .cxx .h .l', shell=False) @extension(['.y', '.yc', '.yy']) def big_bison(self, node): """when it becomes complicated (unlike flex), the old recipes work better (cwd)""" has_h = '-d' in self.env['BISONFLAGS'] outs = [] if node.name.endswith('.yc'): outs.append(node.change_ext('.tab.cc')) if has_h: outs.append(node.change_ext('.tab.hh')) else: outs.append(node.change_ext('.tab.c')) if has_h: outs.append(node.change_ext('.tab.h')) tsk = self.create_task('bison', node, outs) tsk.cwd = node.bld_dir(tsk.env) # and the c/cxx file must be compiled too self.allnodes.append(outs[0]) def detect(conf): bison = conf.find_program('bison', var='BISON', mandatory=True) conf.env['BISONFLAGS'] = '-d' ntdb-1.0/buildtools/wafadmin/Tools/cc.py000066400000000000000000000055171224151530700203160ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) "Base for c programs/libraries" import os import TaskGen, Build, Utils, Task from Logs import debug import ccroot from TaskGen import feature, before, extension, after g_cc_flag_vars = [ 'CCDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'STATICLIB', 'LIB', 'LIBPATH', 'LINKFLAGS', 'RPATH', 'CCFLAGS', 'CPPPATH', 'CPPFLAGS', 'CCDEFINES'] EXT_CC = ['.c'] g_cc_type_vars = ['CCFLAGS', 'LINKFLAGS'] # TODO remove in waf 1.6 class cc_taskgen(ccroot.ccroot_abstract): pass @feature('cc') @before('apply_type_vars') @after('default_cc') def init_cc(self): self.p_flag_vars = set(self.p_flag_vars).union(g_cc_flag_vars) self.p_type_vars = set(self.p_type_vars).union(g_cc_type_vars) if not self.env['CC_NAME']: raise Utils.WafError("At least one compiler (gcc, ..) must be selected") @feature('cc') @after('apply_incpaths') def apply_obj_vars_cc(self): """after apply_incpaths for INC_PATHS""" env = self.env app = env.append_unique cpppath_st = env['CPPPATH_ST'] # local flags come first # set the user-defined includes paths for i in env['INC_PATHS']: app('_CCINCFLAGS', cpppath_st % i.bldpath(env)) app('_CCINCFLAGS', cpppath_st % i.srcpath(env)) # set the library include paths for i in env['CPPPATH']: app('_CCINCFLAGS', cpppath_st % i) @feature('cc') @after('apply_lib_vars') def apply_defines_cc(self): """after uselib is set for CCDEFINES""" self.defines = getattr(self, 'defines', []) lst = self.to_list(self.defines) + self.to_list(self.env['CCDEFINES']) milst = [] # now process the local defines for defi in lst: if not defi in milst: milst.append(defi) # CCDEFINES_ libs = self.to_list(self.uselib) for l in libs: val = self.env['CCDEFINES_'+l] if val: milst += val self.env['DEFLINES'] = ["%s %s" % (x[0], Utils.trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in milst]] y = self.env['CCDEFINES_ST'] self.env.append_unique('_CCDEFFLAGS', [y%x for x in milst]) @extension(EXT_CC) def c_hook(self, node): # create the compilation task: cpp or cc if getattr(self, 'obj_ext', None): obj_ext = self.obj_ext else: obj_ext = '_%d.o' % self.idx task = self.create_task('cc', node, node.change_ext(obj_ext)) try: self.compiled_tasks.append(task) except AttributeError: raise Utils.WafError('Have you forgotten to set the feature "cc" on %s?' % str(self)) return task cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT}' cls = Task.simple_task_type('cc', cc_str, 'GREEN', ext_out='.o', ext_in='.c', shell=False) cls.scan = ccroot.scan cls.vars.append('CCDEPS') link_str = '${LINK_CC} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath(env)} ${LINKFLAGS}' cls = Task.simple_task_type('cc_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False) cls.maxjobs = 1 cls.install = Utils.nada ntdb-1.0/buildtools/wafadmin/Tools/ccroot.py000066400000000000000000000450671224151530700212260ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2008 (ita) "base for all c/c++ programs and libraries" import os, sys, re import TaskGen, Task, Utils, preproc, Logs, Build, Options from Logs import error, debug, warn from Utils import md5 from TaskGen import taskgen, after, before, feature from Constants import * from Configure import conftest try: from cStringIO import StringIO except ImportError: from io import StringIO import config_c # <- necessary for the configuration, do not touch USE_TOP_LEVEL = False def get_cc_version(conf, cc, gcc=False, icc=False): cmd = cc + ['-dM', '-E', '-'] try: p = Utils.pproc.Popen(cmd, stdin=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE) p.stdin.write('\n') out = p.communicate()[0] except: conf.fatal('could not determine the compiler version %r' % cmd) # PY3K: do not touch out = str(out) if gcc: if out.find('__INTEL_COMPILER') >= 0: conf.fatal('The intel compiler pretends to be gcc') if out.find('__GNUC__') < 0: conf.fatal('Could not determine the compiler type') if icc and out.find('__INTEL_COMPILER') < 0: conf.fatal('Not icc/icpc') k = {} if icc or gcc: out = out.split('\n') import shlex for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val def isD(var): return var in k def isT(var): return var in k and k[var] != '0' # Some documentation is available at http://predef.sourceforge.net # The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns. mp1 = { '__linux__' : 'linux', '__GNU__' : 'gnu', '__FreeBSD__' : 'freebsd', '__NetBSD__' : 'netbsd', '__OpenBSD__' : 'openbsd', '__sun' : 'sunos', '__hpux' : 'hpux', '__sgi' : 'irix', '_AIX' : 'aix', '__CYGWIN__' : 'cygwin', '__MSYS__' : 'msys', '_UWIN' : 'uwin', '_WIN64' : 'win32', '_WIN32' : 'win32', '__POWERPC__' : 'powerpc', } for i in mp1: if isD(i): conf.env.DEST_OS = mp1[i] break else: if isD('__APPLE__') and isD('__MACH__'): conf.env.DEST_OS = 'darwin' elif isD('__unix__'): # unix must be tested last as it's a generic fallback conf.env.DEST_OS = 'generic' if isD('__ELF__'): conf.env.DEST_BINFMT = 'elf' elif isD('__WINNT__') or isD('__CYGWIN__'): conf.env.DEST_BINFMT = 'pe' elif isD('__APPLE__'): conf.env.DEST_BINFMT = 'mac-o' mp2 = { '__x86_64__' : 'x86_64', '__i386__' : 'x86', '__ia64__' : 'ia', '__mips__' : 'mips', '__sparc__' : 'sparc', '__alpha__' : 'alpha', '__arm__' : 'arm', '__hppa__' : 'hppa', '__powerpc__' : 'powerpc', } for i in mp2: if isD(i): conf.env.DEST_CPU = mp2[i] break debug('ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')])) conf.env['CC_VERSION'] = (k['__GNUC__'], k['__GNUC_MINOR__'], k['__GNUC_PATCHLEVEL__']) return k class DEBUG_LEVELS: """Will disappear in waf 1.6""" ULTRADEBUG = "ultradebug" DEBUG = "debug" RELEASE = "release" OPTIMIZED = "optimized" CUSTOM = "custom" ALL = [ULTRADEBUG, DEBUG, RELEASE, OPTIMIZED, CUSTOM] def scan(self): "look for .h the .cpp need" debug('ccroot: _scan_preprocessor(self, node, env, path_lst)') # TODO waf 1.6 - assume the default input has exactly one file if len(self.inputs) == 1: node = self.inputs[0] (nodes, names) = preproc.get_deps(node, self.env, nodepaths = self.env['INC_PATHS']) if Logs.verbose: debug('deps: deps for %s: %r; unresolved %r', str(node), nodes, names) return (nodes, names) all_nodes = [] all_names = [] seen = set() for node in self.inputs: (nodes, names) = preproc.get_deps(node, self.env, nodepaths = self.env['INC_PATHS']) if Logs.verbose: debug('deps: deps for %s: %r; unresolved %r', str(node), nodes, names) for x in nodes: if id(x) in seen: continue seen.add(id(x)) all_nodes.append(x) for x in names: if not x in all_names: all_names.append(x) return (all_nodes, all_names) class ccroot_abstract(TaskGen.task_gen): "Parent class for programs and libraries in languages c, c++ and moc (Qt)" def __init__(self, *k, **kw): # COMPAT remove in waf 1.6 TODO if len(k) > 1: k = list(k) if k[1][0] != 'c': k[1] = 'c' + k[1] TaskGen.task_gen.__init__(self, *k, **kw) def get_target_name(self): tp = 'program' for x in self.features: if x in ['cshlib', 'cstaticlib']: tp = x.lstrip('c') pattern = self.env[tp + '_PATTERN'] if not pattern: pattern = '%s' dir, name = os.path.split(self.target) if self.env.DEST_BINFMT == 'pe' and getattr(self, 'vnum', None) and 'cshlib' in self.features: # include the version in the dll file name, # the import lib file name stays unversionned. name = name + '-' + self.vnum.split('.')[0] return os.path.join(dir, pattern % name) @feature('cc', 'cxx') @before('apply_core') def default_cc(self): """compiled_tasks attribute must be set before the '.c->.o' tasks can be created""" Utils.def_attrs(self, includes = '', defines= '', rpaths = '', uselib = '', uselib_local = '', add_objects = '', p_flag_vars = [], p_type_vars = [], compiled_tasks = [], link_task = None) # The only thing we need for cross-compilation is DEST_BINFMT. # At some point, we may reach a case where DEST_BINFMT is not enough, but for now it's sufficient. # Currently, cross-compilation is auto-detected only for the gnu and intel compilers. if not self.env.DEST_BINFMT: # Infer the binary format from the os name. self.env.DEST_BINFMT = Utils.unversioned_sys_platform_to_binary_format( self.env.DEST_OS or Utils.unversioned_sys_platform()) if not self.env.BINDIR: self.env.BINDIR = Utils.subst_vars('${PREFIX}/bin', self.env) if not self.env.LIBDIR: self.env.LIBDIR = Utils.subst_vars('${PREFIX}/lib${LIB_EXT}', self.env) @feature('cprogram', 'dprogram', 'cstaticlib', 'dstaticlib', 'cshlib', 'dshlib') def apply_verif(self): """no particular order, used for diagnostic""" if not (self.source or getattr(self, 'add_objects', None) or getattr(self, 'uselib_local', None) or getattr(self, 'obj_files', None)): raise Utils.WafError('no source files specified for %s' % self) if not self.target: raise Utils.WafError('no target for %s' % self) # TODO reference the d programs, shlibs in d.py, not here @feature('cprogram', 'dprogram') @after('default_cc') @before('apply_core') def vars_target_cprogram(self): self.default_install_path = self.env.BINDIR self.default_chmod = O755 @after('default_cc') @feature('cshlib', 'dshlib') @before('apply_core') def vars_target_cshlib(self): if self.env.DEST_BINFMT == 'pe': # set execute bit on libs to avoid 'permission denied' (issue 283) self.default_chmod = O755 self.default_install_path = self.env.BINDIR else: self.default_install_path = self.env.LIBDIR @feature('cprogram', 'dprogram', 'cstaticlib', 'dstaticlib', 'cshlib', 'dshlib') @after('apply_link', 'vars_target_cprogram', 'vars_target_cshlib') def default_link_install(self): """you may kill this method to inject your own installation for the first element any other install should only process its own nodes and not those from the others""" if self.install_path: self.bld.install_files(self.install_path, self.link_task.outputs[0], env=self.env, chmod=self.chmod) @feature('cc', 'cxx') @after('apply_type_vars', 'apply_lib_vars', 'apply_core') def apply_incpaths(self): """used by the scanner after processing the uselib for CPPPATH after apply_core because some processing may add include paths """ lst = [] # TODO move the uselib processing out of here for lib in self.to_list(self.uselib): for path in self.env['CPPPATH_' + lib]: if not path in lst: lst.append(path) if preproc.go_absolute: for path in preproc.standard_includes: if not path in lst: lst.append(path) for path in self.to_list(self.includes): if not path in lst: if preproc.go_absolute or not os.path.isabs(path): lst.append(path) else: self.env.prepend_value('CPPPATH', path) for path in lst: node = None if os.path.isabs(path): if preproc.go_absolute: node = self.bld.root.find_dir(path) elif path[0] == '#': node = self.bld.srcnode if len(path) > 1: node = node.find_dir(path[1:]) else: node = self.path.find_dir(path) if node: self.env.append_value('INC_PATHS', node) # TODO WAF 1.6 if USE_TOP_LEVEL: self.env.append_value('INC_PATHS', self.bld.srcnode) @feature('cc', 'cxx') @after('init_cc', 'init_cxx') @before('apply_lib_vars') def apply_type_vars(self): """before apply_lib_vars because we modify uselib after init_cc and init_cxx because web need p_type_vars """ for x in self.features: if not x in ['cprogram', 'cstaticlib', 'cshlib']: continue x = x.lstrip('c') # if the type defines uselib to add, add them st = self.env[x + '_USELIB'] if st: self.uselib = self.uselib + ' ' + st # each compiler defines variables like 'shlib_CXXFLAGS', 'shlib_LINKFLAGS', etc # so when we make a task generator of the type shlib, CXXFLAGS are modified accordingly for var in self.p_type_vars: compvar = '%s_%s' % (x, var) #print compvar value = self.env[compvar] if value: self.env.append_value(var, value) @feature('cprogram', 'cshlib', 'cstaticlib') @after('apply_core') def apply_link(self): """executes after apply_core for collecting 'compiled_tasks' use a custom linker if specified (self.link='name-of-custom-link-task')""" link = getattr(self, 'link', None) if not link: if 'cstaticlib' in self.features: link = 'static_link' elif 'cxx' in self.features: link = 'cxx_link' else: link = 'cc_link' tsk = self.create_task(link) outputs = [t.outputs[0] for t in self.compiled_tasks] tsk.set_inputs(outputs) tsk.set_outputs(self.path.find_or_declare(get_target_name(self))) self.link_task = tsk @feature('cc', 'cxx') @after('apply_link', 'init_cc', 'init_cxx', 'apply_core') def apply_lib_vars(self): """after apply_link because of 'link_task' after default_cc because of the attribute 'uselib'""" # after 'apply_core' in case if 'cc' if there is no link env = self.env # 1. the case of the libs defined in the project (visit ancestors first) # the ancestors external libraries (uselib) will be prepended self.uselib = self.to_list(self.uselib) names = self.to_list(self.uselib_local) seen = set([]) tmp = Utils.deque(names) # consume a copy of the list of names while tmp: lib_name = tmp.popleft() # visit dependencies only once if lib_name in seen: continue y = self.name_to_obj(lib_name) if not y: raise Utils.WafError('object %r was not found in uselib_local (required by %r)' % (lib_name, self.name)) y.post() seen.add(lib_name) # object has ancestors to process (shared libraries): add them to the end of the list if getattr(y, 'uselib_local', None): lst = y.to_list(y.uselib_local) if 'cshlib' in y.features or 'cprogram' in y.features: lst = [x for x in lst if not 'cstaticlib' in self.name_to_obj(x).features] tmp.extend(lst) # link task and flags if getattr(y, 'link_task', None): link_name = y.target[y.target.rfind(os.sep) + 1:] if 'cstaticlib' in y.features: env.append_value('STATICLIB', link_name) elif 'cshlib' in y.features or 'cprogram' in y.features: # WARNING some linkers can link against programs env.append_value('LIB', link_name) # the order self.link_task.set_run_after(y.link_task) # for the recompilation dep_nodes = getattr(self.link_task, 'dep_nodes', []) self.link_task.dep_nodes = dep_nodes + y.link_task.outputs # add the link path too tmp_path = y.link_task.outputs[0].parent.bldpath(self.env) if not tmp_path in env['LIBPATH']: env.prepend_value('LIBPATH', tmp_path) # add ancestors uselib too - but only propagate those that have no staticlib for v in self.to_list(y.uselib): if not env['STATICLIB_' + v]: if not v in self.uselib: self.uselib.insert(0, v) # if the library task generator provides 'export_incdirs', add to the include path # the export_incdirs must be a list of paths relative to the other library if getattr(y, 'export_incdirs', None): for x in self.to_list(y.export_incdirs): node = y.path.find_dir(x) if not node: raise Utils.WafError('object %r: invalid folder %r in export_incdirs' % (y.target, x)) self.env.append_unique('INC_PATHS', node) # 2. the case of the libs defined outside for x in self.uselib: for v in self.p_flag_vars: val = self.env[v + '_' + x] if val: self.env.append_value(v, val) @feature('cprogram', 'cstaticlib', 'cshlib') @after('init_cc', 'init_cxx', 'apply_link') def apply_objdeps(self): "add the .o files produced by some other object files in the same manner as uselib_local" if not getattr(self, 'add_objects', None): return seen = [] names = self.to_list(self.add_objects) while names: x = names[0] # visit dependencies only once if x in seen: names = names[1:] continue # object does not exist ? y = self.name_to_obj(x) if not y: raise Utils.WafError('object %r was not found in uselib_local (required by add_objects %r)' % (x, self.name)) # object has ancestors to process first ? update the list of names if getattr(y, 'add_objects', None): added = 0 lst = y.to_list(y.add_objects) lst.reverse() for u in lst: if u in seen: continue added = 1 names = [u]+names if added: continue # list of names modified, loop # safe to process the current object y.post() seen.append(x) for t in y.compiled_tasks: self.link_task.inputs.extend(t.outputs) @feature('cprogram', 'cshlib', 'cstaticlib') @after('apply_lib_vars') def apply_obj_vars(self): """after apply_lib_vars for uselib""" v = self.env lib_st = v['LIB_ST'] staticlib_st = v['STATICLIB_ST'] libpath_st = v['LIBPATH_ST'] staticlibpath_st = v['STATICLIBPATH_ST'] rpath_st = v['RPATH_ST'] app = v.append_unique if v['FULLSTATIC']: v.append_value('LINKFLAGS', v['FULLSTATIC_MARKER']) for i in v['RPATH']: if i and rpath_st: app('LINKFLAGS', rpath_st % i) for i in v['LIBPATH']: app('LINKFLAGS', libpath_st % i) app('LINKFLAGS', staticlibpath_st % i) if v['STATICLIB']: v.append_value('LINKFLAGS', v['STATICLIB_MARKER']) k = [(staticlib_st % i) for i in v['STATICLIB']] app('LINKFLAGS', k) # fully static binaries ? if not v['FULLSTATIC']: if v['STATICLIB'] or v['LIB']: v.append_value('LINKFLAGS', v['SHLIB_MARKER']) app('LINKFLAGS', [lib_st % i for i in v['LIB']]) @after('apply_link') def process_obj_files(self): if not hasattr(self, 'obj_files'): return for x in self.obj_files: node = self.path.find_resource(x) self.link_task.inputs.append(node) @taskgen def add_obj_file(self, file): """Small example on how to link object files as if they were source obj = bld.create_obj('cc') obj.add_obj_file('foo.o')""" if not hasattr(self, 'obj_files'): self.obj_files = [] if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files') self.obj_files.append(file) c_attrs = { 'cxxflag' : 'CXXFLAGS', 'cflag' : 'CCFLAGS', 'ccflag' : 'CCFLAGS', 'linkflag' : 'LINKFLAGS', 'ldflag' : 'LINKFLAGS', 'lib' : 'LIB', 'libpath' : 'LIBPATH', 'staticlib': 'STATICLIB', 'staticlibpath': 'STATICLIBPATH', 'rpath' : 'RPATH', 'framework' : 'FRAMEWORK', 'frameworkpath' : 'FRAMEWORKPATH' } @feature('cc', 'cxx') @before('init_cxx', 'init_cc') @before('apply_lib_vars', 'apply_obj_vars', 'apply_incpaths', 'init_cc') def add_extra_flags(self): """case and plural insensitive before apply_obj_vars for processing the library attributes """ for x in self.__dict__.keys(): y = x.lower() if y[-1] == 's': y = y[:-1] if c_attrs.get(y, None): self.env.append_unique(c_attrs[y], getattr(self, x)) # ============ the code above must not know anything about import libs ========== @feature('cshlib') @after('apply_link', 'default_cc') @before('apply_lib_vars', 'apply_objdeps', 'default_link_install') def apply_implib(self): """On mswindows, handle dlls and their import libs the .dll.a is the import lib and it is required for linking so it is installed too """ if not self.env.DEST_BINFMT == 'pe': return self.meths.remove('default_link_install') bindir = self.install_path if not bindir: return # install the dll in the bin dir dll = self.link_task.outputs[0] self.bld.install_files(bindir, dll, self.env, self.chmod) # add linker flags to generate the import lib implib = self.env['implib_PATTERN'] % os.path.split(self.target)[1] implib = dll.parent.find_or_declare(implib) self.link_task.outputs.append(implib) self.bld.install_as('${LIBDIR}/%s' % implib.name, implib, self.env) self.env.append_value('LINKFLAGS', (self.env['IMPLIB_ST'] % implib.bldpath(self.env)).split()) # ============ the code above must not know anything about vnum processing on unix platforms ========= @feature('cshlib') @after('apply_link') @before('apply_lib_vars', 'default_link_install') def apply_vnum(self): """ libfoo.so is installed as libfoo.so.1.2.3 """ if not getattr(self, 'vnum', '') or not 'cshlib' in self.features or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'): return self.meths.remove('default_link_install') link = self.link_task nums = self.vnum.split('.') node = link.outputs[0] libname = node.name if libname.endswith('.dylib'): name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum) name2 = libname.replace('.dylib', '.%s.dylib' % nums[0]) else: name3 = libname + '.' + self.vnum name2 = libname + '.' + nums[0] if self.env.SONAME_ST: v = self.env.SONAME_ST % name2 self.env.append_value('LINKFLAGS', v.split()) bld = self.bld nums = self.vnum.split('.') path = self.install_path if not path: return if self.env.DEST_OS == 'openbsd': bld.install_as(path + os.sep + name2, node, env=self.env, chmod=self.link_task.chmod) else: bld.install_as(path + os.sep + name3, node, env=self.env) bld.symlink_as(path + os.sep + name2, name3) bld.symlink_as(path + os.sep + libname, name3) # the following task is just to enable execution from the build dir :-/ self.create_task('vnum', node, [node.parent.find_or_declare(name2), node.parent.find_or_declare(name3)]) def exec_vnum_link(self): for x in self.outputs: path = x.abspath(self.env) try: os.remove(path) except OSError: pass try: os.symlink(self.inputs[0].name, path) except OSError: return 1 cls = Task.task_type_from_func('vnum', func=exec_vnum_link, ext_in='.bin', color='CYAN') cls.quiet = 1 # ============ the --as-needed flag should added during the configuration, not at runtime ========= @conftest def add_as_needed(conf): if conf.env.DEST_BINFMT == 'elf' and 'gcc' in (conf.env.CXX_NAME, conf.env.CC_NAME): conf.env.append_unique('LINKFLAGS', '--as-needed') ntdb-1.0/buildtools/wafadmin/Tools/compiler_cc.py000066400000000000000000000040741224151530700222050ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Matthias Jahn jahn dôt matthias ât freenet dôt de, 2007 (pmarat) import os, sys, imp, types, ccroot import optparse import Utils, Configure, Options from Logs import debug c_compiler = { 'win32': ['msvc', 'gcc'], 'cygwin': ['gcc'], 'darwin': ['gcc'], 'aix': ['xlc', 'gcc'], 'linux': ['gcc', 'icc', 'suncc'], 'sunos': ['gcc', 'suncc'], 'irix': ['gcc'], 'hpux': ['gcc'], 'gnu': ['gcc'], 'default': ['gcc'] } def __list_possible_compiler(platform): try: return c_compiler[platform] except KeyError: return c_compiler["default"] def detect(conf): """ for each compiler for the platform, try to configure the compiler in theory the tools should raise a configuration error if the compiler pretends to be something it is not (setting CC=icc and trying to configure gcc) """ try: test_for_compiler = Options.options.check_c_compiler except AttributeError: conf.fatal("Add set_options(opt): opt.tool_options('compiler_cc')") orig = conf.env for compiler in test_for_compiler.split(): conf.env = orig.copy() try: conf.check_tool(compiler) except Configure.ConfigurationError, e: debug('compiler_cc: %r' % e) else: if conf.env['CC']: orig.table = conf.env.get_merged_dict() conf.env = orig conf.check_message(compiler, '', True) conf.env['COMPILER_CC'] = compiler break conf.check_message(compiler, '', False) break else: conf.fatal('could not configure a c compiler!') def set_options(opt): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = __list_possible_compiler(build_platform) test_for_compiler = ' '.join(possible_compiler_list) cc_compiler_opts = opt.add_option_group("C Compiler Options") cc_compiler_opts.add_option('--check-c-compiler', default="%s" % test_for_compiler, help='On this platform (%s) the following C-Compiler will be checked by default: "%s"' % (build_platform, test_for_compiler), dest="check_c_compiler") for c_compiler in test_for_compiler.split(): opt.tool_options('%s' % c_compiler, option_group=cc_compiler_opts) ntdb-1.0/buildtools/wafadmin/Tools/compiler_cxx.py000066400000000000000000000036061224151530700224220ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Matthias Jahn jahn dôt matthias ât freenet dôt de 2007 (pmarat) import os, sys, imp, types, ccroot import optparse import Utils, Configure, Options from Logs import debug cxx_compiler = { 'win32': ['msvc', 'g++'], 'cygwin': ['g++'], 'darwin': ['g++'], 'aix': ['xlc++', 'g++'], 'linux': ['g++', 'icpc', 'sunc++'], 'sunos': ['g++', 'sunc++'], 'irix': ['g++'], 'hpux': ['g++'], 'gnu': ['g++'], 'default': ['g++'] } def __list_possible_compiler(platform): try: return cxx_compiler[platform] except KeyError: return cxx_compiler["default"] def detect(conf): try: test_for_compiler = Options.options.check_cxx_compiler except AttributeError: raise Configure.ConfigurationError("Add set_options(opt): opt.tool_options('compiler_cxx')") orig = conf.env for compiler in test_for_compiler.split(): try: conf.env = orig.copy() conf.check_tool(compiler) except Configure.ConfigurationError, e: debug('compiler_cxx: %r' % e) else: if conf.env['CXX']: orig.table = conf.env.get_merged_dict() conf.env = orig conf.check_message(compiler, '', True) conf.env['COMPILER_CXX'] = compiler break conf.check_message(compiler, '', False) break else: conf.fatal('could not configure a cxx compiler!') def set_options(opt): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = __list_possible_compiler(build_platform) test_for_compiler = ' '.join(possible_compiler_list) cxx_compiler_opts = opt.add_option_group('C++ Compiler Options') cxx_compiler_opts.add_option('--check-cxx-compiler', default="%s" % test_for_compiler, help='On this platform (%s) the following C++ Compiler will be checked by default: "%s"' % (build_platform, test_for_compiler), dest="check_cxx_compiler") for cxx_compiler in test_for_compiler.split(): opt.tool_options('%s' % cxx_compiler, option_group=cxx_compiler_opts) ntdb-1.0/buildtools/wafadmin/Tools/compiler_d.py000066400000000000000000000015151224151530700220400ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) import os, sys, imp, types import Utils, Configure, Options def detect(conf): if getattr(Options.options, 'check_dmd_first', None): test_for_compiler = ['dmd', 'gdc'] else: test_for_compiler = ['gdc', 'dmd'] for d_compiler in test_for_compiler: try: conf.check_tool(d_compiler) except: pass else: break else: conf.fatal('no suitable d compiler was found') def set_options(opt): d_compiler_opts = opt.add_option_group('D Compiler Options') d_compiler_opts.add_option('--check-dmd-first', action='store_true', help='checks for the gdc compiler before dmd (default is the other way round)', dest='check_dmd_first', default=False) for d_compiler in ['gdc', 'dmd']: opt.tool_options('%s' % d_compiler, option_group=d_compiler_opts) ntdb-1.0/buildtools/wafadmin/Tools/config_c.py000066400000000000000000000463511224151530700215010ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2008 (ita) """ c/c++ configuration routines """ import os, imp, sys, shlex, shutil from Utils import md5 import Build, Utils, Configure, Task, Options, Logs, TaskGen from Constants import * from Configure import conf, conftest cfg_ver = { 'atleast-version': '>=', 'exact-version': '==', 'max-version': '<=', } SNIP1 = ''' int main() { void *p; p=(void*)(%s); return 0; } ''' SNIP2 = ''' int main() { if ((%(type_name)s *) 0) return 0; if (sizeof (%(type_name)s)) return 0; } ''' SNIP3 = ''' int main() { return 0; } ''' def parse_flags(line, uselib, env): """pkg-config still has bugs on some platforms, and there are many -config programs, parsing flags is necessary :-/""" lst = shlex.split(line) while lst: x = lst.pop(0) st = x[:2] ot = x[2:] app = env.append_value if st == '-I' or st == '/I': if not ot: ot = lst.pop(0) app('CPPPATH_' + uselib, ot) elif st == '-D': if not ot: ot = lst.pop(0) app('CXXDEFINES_' + uselib, ot) app('CCDEFINES_' + uselib, ot) elif st == '-l': if not ot: ot = lst.pop(0) app('LIB_' + uselib, ot) elif st == '-L': if not ot: ot = lst.pop(0) app('LIBPATH_' + uselib, ot) elif x == '-pthread' or x.startswith('+'): app('CCFLAGS_' + uselib, x) app('CXXFLAGS_' + uselib, x) app('LINKFLAGS_' + uselib, x) elif x == '-framework': app('FRAMEWORK_' + uselib, lst.pop(0)) elif x.startswith('-F'): app('FRAMEWORKPATH_' + uselib, x[2:]) elif x.startswith('-std'): app('CCFLAGS_' + uselib, x) app('CXXFLAGS_' + uselib, x) app('LINKFLAGS_' + uselib, x) # # NOTE on special treatment of -Wl,-R and -Wl,-rpath: # # It is important to not put a library provided RPATH # into the LINKFLAGS but in the RPATH instead, since # the provided LINKFLAGS get prepended to our own internal # RPATH later, and hence can potentially lead to linking # in too old versions of our internal libs. # elif x.startswith('-Wl,-R'): app('RPATH_' + uselib, x[6:]) elif x.startswith('-Wl,-rpath,'): app('RPATH_' + uselib, x[11:]) elif x.startswith('-Wl'): app('LINKFLAGS_' + uselib, x) elif x.startswith('-m') or x.startswith('-f'): app('CCFLAGS_' + uselib, x) app('CXXFLAGS_' + uselib, x) @conf def ret_msg(self, f, kw): """execute a function, when provided""" if isinstance(f, str): return f return f(kw) @conf def validate_cfg(self, kw): if not 'path' in kw: kw['path'] = 'pkg-config --errors-to-stdout --print-errors' # pkg-config version if 'atleast_pkgconfig_version' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for pkg-config version >= %s' % kw['atleast_pkgconfig_version'] return # pkg-config --modversion if 'modversion' in kw: return if 'variables' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for %s variables' % kw['package'] return # checking for the version of a module, for the moment, one thing at a time for x in cfg_ver.keys(): y = x.replace('-', '_') if y in kw: if not 'package' in kw: raise ValueError('%s requires a package' % x) if not 'msg' in kw: kw['msg'] = 'Checking for %s %s %s' % (kw['package'], cfg_ver[x], kw[y]) return if not 'msg' in kw: kw['msg'] = 'Checking for %s' % (kw['package'] or kw['path']) if not 'okmsg' in kw: kw['okmsg'] = 'yes' if not 'errmsg' in kw: kw['errmsg'] = 'not found' @conf def cmd_and_log(self, cmd, kw): Logs.debug('runner: %s\n' % cmd) if self.log: self.log.write('%s\n' % cmd) try: p = Utils.pproc.Popen(cmd, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE, shell=True) (out, err) = p.communicate() except OSError, e: self.log.write('error %r' % e) self.fatal(str(e)) # placeholder, don't touch out = str(out) err = str(err) if self.log: self.log.write(out) self.log.write(err) if p.returncode: if not kw.get('errmsg', ''): if kw.get('mandatory', False): kw['errmsg'] = out.strip() else: kw['errmsg'] = 'no' self.fatal('fail') return out @conf def exec_cfg(self, kw): # pkg-config version if 'atleast_pkgconfig_version' in kw: cmd = '%s --atleast-pkgconfig-version=%s' % (kw['path'], kw['atleast_pkgconfig_version']) self.cmd_and_log(cmd, kw) if not 'okmsg' in kw: kw['okmsg'] = 'yes' return # checking for the version of a module for x in cfg_ver: y = x.replace('-', '_') if y in kw: self.cmd_and_log('%s --%s=%s %s' % (kw['path'], x, kw[y], kw['package']), kw) if not 'okmsg' in kw: kw['okmsg'] = 'yes' self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0) break # retrieving the version of a module if 'modversion' in kw: version = self.cmd_and_log('%s --modversion %s' % (kw['path'], kw['modversion']), kw).strip() self.define('%s_VERSION' % Utils.quote_define_name(kw.get('uselib_store', kw['modversion'])), version) return version # retrieving variables of a module if 'variables' in kw: env = kw.get('env', self.env) uselib = kw.get('uselib_store', kw['package'].upper()) vars = Utils.to_list(kw['variables']) for v in vars: val = self.cmd_and_log('%s --variable=%s %s' % (kw['path'], v, kw['package']), kw).strip() var = '%s_%s' % (uselib, v) env[var] = val if not 'okmsg' in kw: kw['okmsg'] = 'yes' return lst = [kw['path']] defi = kw.get('define_variable', None) if not defi: defi = self.env.PKG_CONFIG_DEFINES or {} for key, val in defi.iteritems(): lst.append('--define-variable=%s=%s' % (key, val)) lst.append(kw.get('args', '')) lst.append(kw['package']) # so we assume the command-line will output flags to be parsed afterwards cmd = ' '.join(lst) ret = self.cmd_and_log(cmd, kw) if not 'okmsg' in kw: kw['okmsg'] = 'yes' self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0) parse_flags(ret, kw.get('uselib_store', kw['package'].upper()), kw.get('env', self.env)) return ret @conf def check_cfg(self, *k, **kw): """ for pkg-config mostly, but also all the -config tools conf.check_cfg(path='mpicc', args='--showme:compile --showme:link', package='', uselib_store='OPEN_MPI') conf.check_cfg(package='dbus-1', variables='system_bus_default_address session_bus_services_dir') """ self.validate_cfg(kw) if 'msg' in kw: self.check_message_1(kw['msg']) ret = None try: ret = self.exec_cfg(kw) except Configure.ConfigurationError, e: if 'errmsg' in kw: self.check_message_2(kw['errmsg'], 'YELLOW') if 'mandatory' in kw and kw['mandatory']: if Logs.verbose > 1: raise else: self.fatal('the configuration failed (see %r)' % self.log.name) else: kw['success'] = ret if 'okmsg' in kw: self.check_message_2(self.ret_msg(kw['okmsg'], kw)) return ret # the idea is the following: now that we are certain # that all the code here is only for c or c++, it is # easy to put all the logic in one function # # this should prevent code duplication (ita) # env: an optional environment (modified -> provide a copy) # compiler: cc or cxx - it tries to guess what is best # type: cprogram, cshlib, cstaticlib # code: a c code to execute # uselib_store: where to add the variables # uselib: parameters to use for building # define: define to set, like FOO in #define FOO, if not set, add /* #undef FOO */ # execute: True or False - will return the result of the execution @conf def validate_c(self, kw): """validate the parameters for the test method""" if not 'env' in kw: kw['env'] = self.env.copy() env = kw['env'] if not 'compiler' in kw: kw['compiler'] = 'cc' if env['CXX_NAME'] and Task.TaskBase.classes.get('cxx', None): kw['compiler'] = 'cxx' if not self.env['CXX']: self.fatal('a c++ compiler is required') else: if not self.env['CC']: self.fatal('a c compiler is required') if not 'type' in kw: kw['type'] = 'cprogram' assert not(kw['type'] != 'cprogram' and kw.get('execute', 0)), 'can only execute programs' #if kw['type'] != 'program' and kw.get('execute', 0): # raise ValueError, 'can only execute programs' def to_header(dct): if 'header_name' in dct: dct = Utils.to_list(dct['header_name']) return ''.join(['#include <%s>\n' % x for x in dct]) return '' # set the file name if not 'compile_mode' in kw: kw['compile_mode'] = (kw['compiler'] == 'cxx') and 'cxx' or 'cc' if not 'compile_filename' in kw: kw['compile_filename'] = 'test.c' + ((kw['compile_mode'] == 'cxx') and 'pp' or '') #OSX if 'framework_name' in kw: try: TaskGen.task_gen.create_task_macapp except AttributeError: self.fatal('frameworks require the osx tool') fwkname = kw['framework_name'] if not 'uselib_store' in kw: kw['uselib_store'] = fwkname.upper() if not kw.get('no_header', False): if not 'header_name' in kw: kw['header_name'] = [] fwk = '%s/%s.h' % (fwkname, fwkname) if kw.get('remove_dot_h', None): fwk = fwk[:-2] kw['header_name'] = Utils.to_list(kw['header_name']) + [fwk] kw['msg'] = 'Checking for framework %s' % fwkname kw['framework'] = fwkname #kw['frameworkpath'] = set it yourself if 'function_name' in kw: fu = kw['function_name'] if not 'msg' in kw: kw['msg'] = 'Checking for function %s' % fu kw['code'] = to_header(kw) + SNIP1 % fu if not 'uselib_store' in kw: kw['uselib_store'] = fu.upper() if not 'define_name' in kw: kw['define_name'] = self.have_define(fu) elif 'type_name' in kw: tu = kw['type_name'] if not 'msg' in kw: kw['msg'] = 'Checking for type %s' % tu if not 'header_name' in kw: kw['header_name'] = 'stdint.h' kw['code'] = to_header(kw) + SNIP2 % {'type_name' : tu} if not 'define_name' in kw: kw['define_name'] = self.have_define(tu.upper()) elif 'header_name' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for header %s' % kw['header_name'] l = Utils.to_list(kw['header_name']) assert len(l)>0, 'list of headers in header_name is empty' kw['code'] = to_header(kw) + SNIP3 if not 'uselib_store' in kw: kw['uselib_store'] = l[0].upper() if not 'define_name' in kw: kw['define_name'] = self.have_define(l[0]) if 'lib' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for library %s' % kw['lib'] if not 'uselib_store' in kw: kw['uselib_store'] = kw['lib'].upper() if 'staticlib' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for static library %s' % kw['staticlib'] if not 'uselib_store' in kw: kw['uselib_store'] = kw['staticlib'].upper() if 'fragment' in kw: # an additional code fragment may be provided to replace the predefined code # in custom headers kw['code'] = kw['fragment'] if not 'msg' in kw: kw['msg'] = 'Checking for custom code' if not 'errmsg' in kw: kw['errmsg'] = 'no' for (flagsname,flagstype) in [('cxxflags','compiler'), ('cflags','compiler'), ('linkflags','linker')]: if flagsname in kw: if not 'msg' in kw: kw['msg'] = 'Checking for %s flags %s' % (flagstype, kw[flagsname]) if not 'errmsg' in kw: kw['errmsg'] = 'no' if not 'execute' in kw: kw['execute'] = False if not 'errmsg' in kw: kw['errmsg'] = 'not found' if not 'okmsg' in kw: kw['okmsg'] = 'yes' if not 'code' in kw: kw['code'] = SNIP3 if not kw.get('success'): kw['success'] = None assert 'msg' in kw, 'invalid parameters, read http://freehackers.org/~tnagy/wafbook/single.html#config_helpers_c' @conf def post_check(self, *k, **kw): "set the variables after a test was run successfully" is_success = False if kw['execute']: if kw['success'] is not None: is_success = True else: is_success = (kw['success'] == 0) if 'define_name' in kw: if 'header_name' in kw or 'function_name' in kw or 'type_name' in kw or 'fragment' in kw: if kw['execute']: key = kw['success'] if isinstance(key, str): if key: self.define(kw['define_name'], key, quote=kw.get('quote', 1)) else: self.define_cond(kw['define_name'], True) else: self.define_cond(kw['define_name'], False) else: self.define_cond(kw['define_name'], is_success) if is_success and 'uselib_store' in kw: import cc, cxx for k in set(cc.g_cc_flag_vars).union(cxx.g_cxx_flag_vars): lk = k.lower() # inconsistency: includes -> CPPPATH if k == 'CPPPATH': lk = 'includes' if k == 'CXXDEFINES': lk = 'defines' if k == 'CCDEFINES': lk = 'defines' if lk in kw: val = kw[lk] # remove trailing slash if isinstance(val, str): val = val.rstrip(os.path.sep) self.env.append_unique(k + '_' + kw['uselib_store'], val) @conf def check(self, *k, **kw): # so this will be the generic function # it will be safer to use check_cxx or check_cc self.validate_c(kw) self.check_message_1(kw['msg']) ret = None try: ret = self.run_c_code(*k, **kw) except Configure.ConfigurationError, e: self.check_message_2(kw['errmsg'], 'YELLOW') if 'mandatory' in kw and kw['mandatory']: if Logs.verbose > 1: raise else: self.fatal('the configuration failed (see %r)' % self.log.name) else: kw['success'] = ret self.check_message_2(self.ret_msg(kw['okmsg'], kw)) self.post_check(*k, **kw) if not kw.get('execute', False): return ret == 0 return ret @conf def run_c_code(self, *k, **kw): test_f_name = kw['compile_filename'] k = 0 while k < 10000: # make certain to use a fresh folder - necessary for win32 dir = os.path.join(self.blddir, '.conf_check_%d' % k) # if the folder already exists, remove it try: shutil.rmtree(dir) except OSError: pass try: os.stat(dir) except OSError: break k += 1 try: os.makedirs(dir) except: self.fatal('cannot create a configuration test folder %r' % dir) try: os.stat(dir) except: self.fatal('cannot use the configuration test folder %r' % dir) bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) env = kw['env'] dest = open(os.path.join(dir, test_f_name), 'w') dest.write(kw['code']) dest.close() back = os.path.abspath('.') bld = Build.BuildContext() bld.log = self.log bld.all_envs.update(self.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) os.chdir(dir) bld.rescan(bld.srcnode) if not 'features' in kw: # conf.check(features='cc cprogram pyext', ...) kw['features'] = [kw['compile_mode'], kw['type']] # "cprogram cc" o = bld(features=kw['features'], source=test_f_name, target='testprog') for k, v in kw.iteritems(): setattr(o, k, v) self.log.write("==>\n%s\n<==\n" % kw['code']) # compile the program try: bld.compile() except Utils.WafError: ret = Utils.ex_stack() else: ret = 0 # chdir before returning os.chdir(back) if ret: self.log.write('command returned %r' % ret) self.fatal(str(ret)) # if we need to run the program, try to get its result # keep the name of the program to execute if kw['execute']: lastprog = o.link_task.outputs[0].abspath(env) args = Utils.to_list(kw.get('exec_args', [])) proc = Utils.pproc.Popen([lastprog] + args, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE) (out, err) = proc.communicate() w = self.log.write w(str(out)) w('\n') w(str(err)) w('\n') w('returncode %r' % proc.returncode) w('\n') if proc.returncode: self.fatal(Utils.ex_stack()) ret = out return ret @conf def check_cxx(self, *k, **kw): kw['compiler'] = 'cxx' return self.check(*k, **kw) @conf def check_cc(self, *k, **kw): kw['compiler'] = 'cc' return self.check(*k, **kw) @conf def define(self, define, value, quote=1): """store a single define and its state into an internal list for later writing to a config header file. Value can only be a string or int; other types not supported. String values will appear properly quoted in the generated header file.""" assert define and isinstance(define, str) # ordered_dict is for writing the configuration header in order tbl = self.env[DEFINES] or Utils.ordered_dict() # the user forgot to tell if the value is quoted or not if isinstance(value, str): if quote: tbl[define] = '"%s"' % repr('"'+value)[2:-1].replace('"', '\\"') else: tbl[define] = value elif isinstance(value, int): tbl[define] = value else: raise TypeError('define %r -> %r must be a string or an int' % (define, value)) # add later to make reconfiguring faster self.env[DEFINES] = tbl self.env[define] = value # <- not certain this is necessary @conf def undefine(self, define): """store a single define and its state into an internal list for later writing to a config header file""" assert define and isinstance(define, str) tbl = self.env[DEFINES] or Utils.ordered_dict() value = UNDEFINED tbl[define] = value # add later to make reconfiguring faster self.env[DEFINES] = tbl self.env[define] = value @conf def define_cond(self, name, value): """Conditionally define a name. Formally equivalent to: if value: define(name, 1) else: undefine(name)""" if value: self.define(name, 1) else: self.undefine(name) @conf def is_defined(self, key): defines = self.env[DEFINES] if not defines: return False try: value = defines[key] except KeyError: return False else: return value != UNDEFINED @conf def get_define(self, define): "get the value of a previously stored define" try: return self.env[DEFINES][define] except KeyError: return None @conf def have_define(self, name): "prefix the define with 'HAVE_' and make sure it has valid characters." return self.__dict__.get('HAVE_PAT', 'HAVE_%s') % Utils.quote_define_name(name) @conf def write_config_header(self, configfile='', env='', guard='', top=False): "save the defines into a file" if not configfile: configfile = WAF_CONFIG_H waf_guard = guard or '_%s_WAF' % Utils.quote_define_name(configfile) # configfile -> absolute path # there is a good reason to concatenate first and to split afterwards if not env: env = self.env if top: diff = '' else: diff = Utils.diff_path(self.srcdir, self.curdir) full = os.sep.join([self.blddir, env.variant(), diff, configfile]) full = os.path.normpath(full) (dir, base) = os.path.split(full) try: os.makedirs(dir) except: pass dest = open(full, 'w') dest.write('/* Configuration header created by Waf - do not edit */\n') dest.write('#ifndef %s\n#define %s\n\n' % (waf_guard, waf_guard)) dest.write(self.get_config_header()) # config files are not removed on "waf clean" env.append_unique(CFG_FILES, os.path.join(diff, configfile)) dest.write('\n#endif /* %s */\n' % waf_guard) dest.close() @conf def get_config_header(self): """Fill-in the contents of the config header. Override when you need to write your own config header.""" config_header = [] tbl = self.env[DEFINES] or Utils.ordered_dict() for key in tbl.allkeys: value = tbl[key] if value is None: config_header.append('#define %s' % key) elif value is UNDEFINED: config_header.append('/* #undef %s */' % key) else: config_header.append('#define %s %s' % (key, value)) return "\n".join(config_header) @conftest def find_cpp(conf): v = conf.env cpp = [] if v['CPP']: cpp = v['CPP'] elif 'CPP' in conf.environ: cpp = conf.environ['CPP'] if not cpp: cpp = conf.find_program('cpp', var='CPP') #if not cpp: cpp = v['CC'] #if not cpp: cpp = v['CXX'] v['CPP'] = cpp @conftest def cc_add_flags(conf): conf.add_os_flags('CFLAGS', 'CCFLAGS') conf.add_os_flags('CPPFLAGS') @conftest def cxx_add_flags(conf): conf.add_os_flags('CXXFLAGS') conf.add_os_flags('CPPFLAGS') @conftest def link_add_flags(conf): conf.add_os_flags('LINKFLAGS') conf.add_os_flags('LDFLAGS', 'LINKFLAGS') @conftest def cc_load_tools(conf): conf.check_tool('cc') @conftest def cxx_load_tools(conf): conf.check_tool('cxx') ntdb-1.0/buildtools/wafadmin/Tools/cs.py000066400000000000000000000034131224151530700203270ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) "C# support" import TaskGen, Utils, Task, Options from Logs import error from TaskGen import before, after, taskgen, feature flag_vars= ['FLAGS', 'ASSEMBLIES'] @feature('cs') def init_cs(self): Utils.def_attrs(self, flags = '', assemblies = '', resources = '', uselib = '') @feature('cs') @after('init_cs') def apply_uselib_cs(self): if not self.uselib: return global flag_vars for var in self.to_list(self.uselib): for v in self.flag_vars: val = self.env[v+'_'+var] if val: self.env.append_value(v, val) @feature('cs') @after('apply_uselib_cs') @before('apply_core') def apply_cs(self): try: self.meths.remove('apply_core') except ValueError: pass # process the flags for the assemblies for i in self.to_list(self.assemblies) + self.env['ASSEMBLIES']: self.env.append_unique('_ASSEMBLIES', '/r:'+i) # process the flags for the resources for i in self.to_list(self.resources): self.env.append_unique('_RESOURCES', '/resource:'+i) # what kind of assembly are we generating? self.env['_TYPE'] = getattr(self, 'type', 'exe') # additional flags self.env.append_unique('_FLAGS', self.to_list(self.flags)) self.env.append_unique('_FLAGS', self.env.FLAGS) # process the sources nodes = [self.path.find_resource(i) for i in self.to_list(self.source)] self.create_task('mcs', nodes, self.path.find_or_declare(self.target)) Task.simple_task_type('mcs', '${MCS} ${SRC} /target:${_TYPE} /out:${TGT} ${_FLAGS} ${_ASSEMBLIES} ${_RESOURCES}', color='YELLOW') def detect(conf): csc = getattr(Options.options, 'cscbinary', None) if csc: conf.env.MCS = csc conf.find_program(['gmcs', 'mcs'], var='MCS') def set_options(opt): opt.add_option('--with-csc-binary', type='string', dest='cscbinary') ntdb-1.0/buildtools/wafadmin/Tools/cxx.py000066400000000000000000000060701224151530700205260ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005 (ita) "Base for c++ programs and libraries" import TaskGen, Task, Utils from Logs import debug import ccroot # <- do not remove from TaskGen import feature, before, extension, after g_cxx_flag_vars = [ 'CXXDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'STATICLIB', 'LIB', 'LIBPATH', 'LINKFLAGS', 'RPATH', 'CXXFLAGS', 'CCFLAGS', 'CPPPATH', 'CPPFLAGS', 'CXXDEFINES'] "main cpp variables" EXT_CXX = ['.cpp', '.cc', '.cxx', '.C', '.c++'] g_cxx_type_vars=['CXXFLAGS', 'LINKFLAGS'] # TODO remove in waf 1.6 class cxx_taskgen(ccroot.ccroot_abstract): pass @feature('cxx') @before('apply_type_vars') @after('default_cc') def init_cxx(self): if not 'cc' in self.features: self.mappings['.c'] = TaskGen.task_gen.mappings['.cxx'] self.p_flag_vars = set(self.p_flag_vars).union(g_cxx_flag_vars) self.p_type_vars = set(self.p_type_vars).union(g_cxx_type_vars) if not self.env['CXX_NAME']: raise Utils.WafError("At least one compiler (g++, ..) must be selected") @feature('cxx') @after('apply_incpaths') def apply_obj_vars_cxx(self): """after apply_incpaths for INC_PATHS""" env = self.env app = env.append_unique cxxpath_st = env['CPPPATH_ST'] # local flags come first # set the user-defined includes paths for i in env['INC_PATHS']: app('_CXXINCFLAGS', cxxpath_st % i.bldpath(env)) app('_CXXINCFLAGS', cxxpath_st % i.srcpath(env)) # set the library include paths for i in env['CPPPATH']: app('_CXXINCFLAGS', cxxpath_st % i) @feature('cxx') @after('apply_lib_vars') def apply_defines_cxx(self): """after uselib is set for CXXDEFINES""" self.defines = getattr(self, 'defines', []) lst = self.to_list(self.defines) + self.to_list(self.env['CXXDEFINES']) milst = [] # now process the local defines for defi in lst: if not defi in milst: milst.append(defi) # CXXDEFINES_USELIB libs = self.to_list(self.uselib) for l in libs: val = self.env['CXXDEFINES_'+l] if val: milst += self.to_list(val) self.env['DEFLINES'] = ["%s %s" % (x[0], Utils.trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in milst]] y = self.env['CXXDEFINES_ST'] self.env.append_unique('_CXXDEFFLAGS', [y%x for x in milst]) @extension(EXT_CXX) def cxx_hook(self, node): # create the compilation task: cpp or cc if getattr(self, 'obj_ext', None): obj_ext = self.obj_ext else: obj_ext = '_%d.o' % self.idx task = self.create_task('cxx', node, node.change_ext(obj_ext)) try: self.compiled_tasks.append(task) except AttributeError: raise Utils.WafError('Have you forgotten to set the feature "cxx" on %s?' % str(self)) return task cxx_str = '${CXX} ${CXXFLAGS} ${CPPFLAGS} ${_CXXINCFLAGS} ${_CXXDEFFLAGS} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT}' cls = Task.simple_task_type('cxx', cxx_str, color='GREEN', ext_out='.o', ext_in='.cxx', shell=False) cls.scan = ccroot.scan cls.vars.append('CXXDEPS') link_str = '${LINK_CXX} ${CXXLNK_SRC_F}${SRC} ${CXXLNK_TGT_F}${TGT[0].abspath(env)} ${LINKFLAGS}' cls = Task.simple_task_type('cxx_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False) cls.maxjobs = 1 cls.install = Utils.nada ntdb-1.0/buildtools/wafadmin/Tools/d.py000066400000000000000000000342341224151530700201520ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2007-2008 (ita) import os, sys, re, optparse import ccroot # <- leave this import TaskGen, Utils, Task, Configure, Logs, Build from Logs import debug, error from TaskGen import taskgen, feature, after, before, extension from Configure import conftest EXT_D = ['.d', '.di', '.D'] D_METHS = ['apply_core', 'apply_vnum', 'apply_objdeps'] # additional d methods DLIB = """ version(D_Version2) { import std.stdio; int main() { writefln("phobos2"); return 0; } } else { version(Tango) { import tango.stdc.stdio; int main() { printf("tango"); return 0; } } else { import std.stdio; int main() { writefln("phobos1"); return 0; } } } """ def filter_comments(filename): txt = Utils.readf(filename) i = 0 buf = [] max = len(txt) begin = 0 while i < max: c = txt[i] if c == '"' or c == "'": # skip a string or character literal buf.append(txt[begin:i]) delim = c i += 1 while i < max: c = txt[i] if c == delim: break elif c == '\\': # skip the character following backslash i += 1 i += 1 i += 1 begin = i elif c == '/': # try to replace a comment with whitespace buf.append(txt[begin:i]) i += 1 if i == max: break c = txt[i] if c == '+': # eat nesting /+ +/ comment i += 1 nesting = 1 c = None while i < max: prev = c c = txt[i] if prev == '/' and c == '+': nesting += 1 c = None elif prev == '+' and c == '/': nesting -= 1 if nesting == 0: break c = None i += 1 elif c == '*': # eat /* */ comment i += 1 c = None while i < max: prev = c c = txt[i] if prev == '*' and c == '/': break i += 1 elif c == '/': # eat // comment i += 1 while i < max and txt[i] != '\n': i += 1 else: # no comment begin = i - 1 continue i += 1 begin = i buf.append(' ') else: i += 1 buf.append(txt[begin:]) return buf class d_parser(object): def __init__(self, env, incpaths): #self.code = '' #self.module = '' #self.imports = [] self.allnames = [] self.re_module = re.compile("module\s+([^;]+)") self.re_import = re.compile("import\s+([^;]+)") self.re_import_bindings = re.compile("([^:]+):(.*)") self.re_import_alias = re.compile("[^=]+=(.+)") self.env = env self.nodes = [] self.names = [] self.incpaths = incpaths def tryfind(self, filename): found = 0 for n in self.incpaths: found = n.find_resource(filename.replace('.', '/') + '.d') if found: self.nodes.append(found) self.waiting.append(found) break if not found: if not filename in self.names: self.names.append(filename) def get_strings(self, code): #self.imports = [] self.module = '' lst = [] # get the module name (if present) mod_name = self.re_module.search(code) if mod_name: self.module = re.sub('\s+', '', mod_name.group(1)) # strip all whitespaces # go through the code, have a look at all import occurrences # first, lets look at anything beginning with "import" and ending with ";" import_iterator = self.re_import.finditer(code) if import_iterator: for import_match in import_iterator: import_match_str = re.sub('\s+', '', import_match.group(1)) # strip all whitespaces # does this end with an import bindings declaration? # (import bindings always terminate the list of imports) bindings_match = self.re_import_bindings.match(import_match_str) if bindings_match: import_match_str = bindings_match.group(1) # if so, extract the part before the ":" (since the module declaration(s) is/are located there) # split the matching string into a bunch of strings, separated by a comma matches = import_match_str.split(',') for match in matches: alias_match = self.re_import_alias.match(match) if alias_match: # is this an alias declaration? (alias = module name) if so, extract the module name match = alias_match.group(1) lst.append(match) return lst def start(self, node): self.waiting = [node] # while the stack is not empty, add the dependencies while self.waiting: nd = self.waiting.pop(0) self.iter(nd) def iter(self, node): path = node.abspath(self.env) # obtain the absolute path code = "".join(filter_comments(path)) # read the file and filter the comments names = self.get_strings(code) # obtain the import strings for x in names: # optimization if x in self.allnames: continue self.allnames.append(x) # for each name, see if it is like a node or not self.tryfind(x) def scan(self): "look for .d/.di the .d source need" env = self.env gruik = d_parser(env, env['INC_PATHS']) gruik.start(self.inputs[0]) if Logs.verbose: debug('deps: nodes found for %s: %s %s' % (str(self.inputs[0]), str(gruik.nodes), str(gruik.names))) #debug("deps found for %s: %s" % (str(node), str(gruik.deps)), 'deps') return (gruik.nodes, gruik.names) def get_target_name(self): "for d programs and libs" v = self.env tp = 'program' for x in self.features: if x in ['dshlib', 'dstaticlib']: tp = x.lstrip('d') return v['D_%s_PATTERN' % tp] % self.target d_params = { 'dflags': '', 'importpaths':'', 'libs':'', 'libpaths':'', 'generate_headers':False, } @feature('d') @before('apply_type_vars') def init_d(self): for x in d_params: setattr(self, x, getattr(self, x, d_params[x])) class d_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) # COMPAT if len(k) > 1: self.features.append('d' + k[1]) # okay, we borrow a few methods from ccroot TaskGen.bind_feature('d', D_METHS) @feature('d') @before('apply_d_libs') def init_d(self): Utils.def_attrs(self, dflags='', importpaths='', libs='', libpaths='', uselib='', uselib_local='', generate_headers=False, # set to true if you want .di files as well as .o compiled_tasks=[], add_objects=[], link_task=None) @feature('d') @after('apply_d_link', 'init_d') @before('apply_vnum', 'apply_d_vars') def apply_d_libs(self): """after apply_link because of 'link_task' after default_cc because of the attribute 'uselib'""" env = self.env # 1. the case of the libs defined in the project (visit ancestors first) # the ancestors external libraries (uselib) will be prepended self.uselib = self.to_list(self.uselib) names = self.to_list(self.uselib_local) seen = set([]) tmp = Utils.deque(names) # consume a copy of the list of names while tmp: lib_name = tmp.popleft() # visit dependencies only once if lib_name in seen: continue y = self.name_to_obj(lib_name) if not y: raise Utils.WafError('object %r was not found in uselib_local (required by %r)' % (lib_name, self.name)) y.post() seen.add(lib_name) # object has ancestors to process (shared libraries): add them to the end of the list if getattr(y, 'uselib_local', None): lst = y.to_list(y.uselib_local) if 'dshlib' in y.features or 'dprogram' in y.features: lst = [x for x in lst if not 'dstaticlib' in self.name_to_obj(x).features] tmp.extend(lst) # link task and flags if getattr(y, 'link_task', None): link_name = y.target[y.target.rfind(os.sep) + 1:] if 'dstaticlib' in y.features or 'dshlib' in y.features: env.append_unique('DLINKFLAGS', env.DLIB_ST % link_name) env.append_unique('DLINKFLAGS', env.DLIBPATH_ST % y.link_task.outputs[0].parent.bldpath(env)) # the order self.link_task.set_run_after(y.link_task) # for the recompilation dep_nodes = getattr(self.link_task, 'dep_nodes', []) self.link_task.dep_nodes = dep_nodes + y.link_task.outputs # add ancestors uselib too - but only propagate those that have no staticlib for v in self.to_list(y.uselib): if not v in self.uselib: self.uselib.insert(0, v) # if the library task generator provides 'export_incdirs', add to the include path # the export_incdirs must be a list of paths relative to the other library if getattr(y, 'export_incdirs', None): for x in self.to_list(y.export_incdirs): node = y.path.find_dir(x) if not node: raise Utils.WafError('object %r: invalid folder %r in export_incdirs' % (y.target, x)) self.env.append_unique('INC_PATHS', node) @feature('dprogram', 'dshlib', 'dstaticlib') @after('apply_core') def apply_d_link(self): link = getattr(self, 'link', None) if not link: if 'dstaticlib' in self.features: link = 'static_link' else: link = 'd_link' outputs = [t.outputs[0] for t in self.compiled_tasks] self.link_task = self.create_task(link, outputs, self.path.find_or_declare(get_target_name(self))) @feature('d') @after('apply_core') def apply_d_vars(self): env = self.env dpath_st = env['DPATH_ST'] lib_st = env['DLIB_ST'] libpath_st = env['DLIBPATH_ST'] importpaths = self.to_list(self.importpaths) libpaths = [] libs = [] uselib = self.to_list(self.uselib) for i in uselib: if env['DFLAGS_' + i]: env.append_unique('DFLAGS', env['DFLAGS_' + i]) for x in self.features: if not x in ['dprogram', 'dstaticlib', 'dshlib']: continue x.lstrip('d') d_shlib_dflags = env['D_' + x + '_DFLAGS'] if d_shlib_dflags: env.append_unique('DFLAGS', d_shlib_dflags) # add import paths for i in uselib: if env['DPATH_' + i]: for entry in self.to_list(env['DPATH_' + i]): if not entry in importpaths: importpaths.append(entry) # now process the import paths for path in importpaths: if os.path.isabs(path): env.append_unique('_DIMPORTFLAGS', dpath_st % path) else: node = self.path.find_dir(path) self.env.append_unique('INC_PATHS', node) env.append_unique('_DIMPORTFLAGS', dpath_st % node.srcpath(env)) env.append_unique('_DIMPORTFLAGS', dpath_st % node.bldpath(env)) # add library paths for i in uselib: if env['LIBPATH_' + i]: for entry in self.to_list(env['LIBPATH_' + i]): if not entry in libpaths: libpaths.append(entry) libpaths = self.to_list(self.libpaths) + libpaths # now process the library paths # apply same path manipulation as used with import paths for path in libpaths: if not os.path.isabs(path): node = self.path.find_resource(path) if not node: raise Utils.WafError('could not find libpath %r from %r' % (path, self)) path = node.abspath(self.env) env.append_unique('DLINKFLAGS', libpath_st % path) # add libraries for i in uselib: if env['LIB_' + i]: for entry in self.to_list(env['LIB_' + i]): if not entry in libs: libs.append(entry) libs.extend(self.to_list(self.libs)) # process user flags for flag in self.to_list(self.dflags): env.append_unique('DFLAGS', flag) # now process the libraries for lib in libs: env.append_unique('DLINKFLAGS', lib_st % lib) # add linker flags for i in uselib: dlinkflags = env['DLINKFLAGS_' + i] if dlinkflags: for linkflag in dlinkflags: env.append_unique('DLINKFLAGS', linkflag) @feature('dshlib') @after('apply_d_vars') def add_shlib_d_flags(self): for linkflag in self.env['D_shlib_LINKFLAGS']: self.env.append_unique('DLINKFLAGS', linkflag) @extension(EXT_D) def d_hook(self, node): # create the compilation task: cpp or cc task = self.create_task(self.generate_headers and 'd_with_header' or 'd') try: obj_ext = self.obj_ext except AttributeError: obj_ext = '_%d.o' % self.idx task.inputs = [node] task.outputs = [node.change_ext(obj_ext)] self.compiled_tasks.append(task) if self.generate_headers: header_node = node.change_ext(self.env['DHEADER_ext']) task.outputs += [header_node] d_str = '${D_COMPILER} ${DFLAGS} ${_DIMPORTFLAGS} ${D_SRC_F}${SRC} ${D_TGT_F}${TGT}' d_with_header_str = '${D_COMPILER} ${DFLAGS} ${_DIMPORTFLAGS} \ ${D_HDR_F}${TGT[1].bldpath(env)} \ ${D_SRC_F}${SRC} \ ${D_TGT_F}${TGT[0].bldpath(env)}' link_str = '${D_LINKER} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F}${TGT} ${DLINKFLAGS}' def override_exec(cls): """stupid dmd wants -of stuck to the file name""" old_exec = cls.exec_command def exec_command(self, *k, **kw): if isinstance(k[0], list): lst = k[0] for i in xrange(len(lst)): if lst[i] == '-of': del lst[i] lst[i] = '-of' + lst[i] break return old_exec(self, *k, **kw) cls.exec_command = exec_command cls = Task.simple_task_type('d', d_str, 'GREEN', before='static_link d_link', shell=False) cls.scan = scan override_exec(cls) cls = Task.simple_task_type('d_with_header', d_with_header_str, 'GREEN', before='static_link d_link', shell=False) override_exec(cls) cls = Task.simple_task_type('d_link', link_str, color='YELLOW', shell=False) override_exec(cls) # for feature request #104 @taskgen def generate_header(self, filename, install_path): if not hasattr(self, 'header_lst'): self.header_lst = [] self.meths.append('process_header') self.header_lst.append([filename, install_path]) @before('apply_core') def process_header(self): env = self.env for i in getattr(self, 'header_lst', []): node = self.path.find_resource(i[0]) if not node: raise Utils.WafError('file not found on d obj '+i[0]) task = self.create_task('d_header') task.set_inputs(node) task.set_outputs(node.change_ext('.di')) d_header_str = '${D_COMPILER} ${D_HEADER} ${SRC}' Task.simple_task_type('d_header', d_header_str, color='BLUE', shell=False) @conftest def d_platform_flags(conf): v = conf.env binfmt = v.DEST_BINFMT or Utils.unversioned_sys_platform_to_binary_format( v.DEST_OS or Utils.unversioned_sys_platform()) if binfmt == 'pe': v['D_program_PATTERN'] = '%s.exe' v['D_shlib_PATTERN'] = 'lib%s.dll' v['D_staticlib_PATTERN'] = 'lib%s.a' else: v['D_program_PATTERN'] = '%s' v['D_shlib_PATTERN'] = 'lib%s.so' v['D_staticlib_PATTERN'] = 'lib%s.a' @conftest def check_dlibrary(conf): ret = conf.check_cc(features='d dprogram', fragment=DLIB, mandatory=True, compile_filename='test.d', execute=True) conf.env.DLIBRARY = ret.strip() # quick test # if __name__ == "__main__": #Logs.verbose = 2 try: arg = sys.argv[1] except IndexError: arg = "file.d" print("".join(filter_comments(arg))) # TODO paths = ['.'] #gruik = filter() #gruik.start(arg) #code = "".join(gruik.buf) #print "we have found the following code" #print code #print "now parsing" #print "-------------------------------------------" """ parser_ = d_parser() parser_.start(arg) print "module: %s" % parser_.module print "imports: ", for imp in parser_.imports: print imp + " ", print """ ntdb-1.0/buildtools/wafadmin/Tools/dbus.py000066400000000000000000000017601224151530700206620ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 import Task, Utils from TaskGen import taskgen, before, after, feature @taskgen def add_dbus_file(self, filename, prefix, mode): if not hasattr(self, 'dbus_lst'): self.dbus_lst = [] self.meths.append('process_dbus') self.dbus_lst.append([filename, prefix, mode]) @before('apply_core') def process_dbus(self): for filename, prefix, mode in getattr(self, 'dbus_lst', []): node = self.path.find_resource(filename) if not node: raise Utils.WafError('file not found ' + filename) tsk = self.create_task('dbus_binding_tool', node, node.change_ext('.h')) tsk.env.DBUS_BINDING_TOOL_PREFIX = prefix tsk.env.DBUS_BINDING_TOOL_MODE = mode Task.simple_task_type('dbus_binding_tool', '${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}', color='BLUE', before='cc') def detect(conf): dbus_binding_tool = conf.find_program('dbus-binding-tool', var='DBUS_BINDING_TOOL') ntdb-1.0/buildtools/wafadmin/Tools/dmd.py000066400000000000000000000030551224151530700204700ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2008 (ita) import sys import Utils, ar from Configure import conftest @conftest def find_dmd(conf): conf.find_program(['dmd', 'ldc'], var='D_COMPILER', mandatory=True) @conftest def common_flags_ldc(conf): v = conf.env v['DFLAGS'] = ['-d-version=Posix'] v['DLINKFLAGS'] = [] v['D_shlib_DFLAGS'] = ['-relocation-model=pic'] @conftest def common_flags_dmd(conf): v = conf.env # _DFLAGS _DIMPORTFLAGS # Compiler is dmd so 'gdc' part will be ignored, just # ensure key is there, so wscript can append flags to it v['DFLAGS'] = ['-version=Posix'] v['D_SRC_F'] = '' v['D_TGT_F'] = ['-c', '-of'] v['DPATH_ST'] = '-I%s' # template for adding import paths # linker v['D_LINKER'] = v['D_COMPILER'] v['DLNK_SRC_F'] = '' v['DLNK_TGT_F'] = '-of' v['DLIB_ST'] = '-L-l%s' # template for adding libs v['DLIBPATH_ST'] = '-L-L%s' # template for adding libpaths # linker debug levels v['DFLAGS_OPTIMIZED'] = ['-O'] v['DFLAGS_DEBUG'] = ['-g', '-debug'] v['DFLAGS_ULTRADEBUG'] = ['-g', '-debug'] v['DLINKFLAGS'] = ['-quiet'] v['D_shlib_DFLAGS'] = ['-fPIC'] v['D_shlib_LINKFLAGS'] = ['-L-shared'] v['DHEADER_ext'] = '.di' v['D_HDR_F'] = ['-H', '-Hf'] def detect(conf): conf.find_dmd() conf.check_tool('ar') conf.check_tool('d') conf.common_flags_dmd() conf.d_platform_flags() if conf.env.D_COMPILER.find('ldc') > -1: conf.common_flags_ldc() ntdb-1.0/buildtools/wafadmin/Tools/flex.py000066400000000000000000000007271224151530700206650ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # John O'Meara, 2006 # Thomas Nagy, 2006-2008 "Flex processing" import TaskGen def decide_ext(self, node): if 'cxx' in self.features: return '.lex.cc' else: return '.lex.c' TaskGen.declare_chain( name = 'flex', rule = '${FLEX} -o${TGT} ${FLEXFLAGS} ${SRC}', ext_in = '.l', ext_out = '.c .cxx', decider = decide_ext ) def detect(conf): conf.find_program('flex', var='FLEX', mandatory=True) conf.env['FLEXFLAGS'] = '' ntdb-1.0/buildtools/wafadmin/Tools/gas.py000066400000000000000000000021251224151530700204730ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008 (ita) "as and gas" import os, sys import Task from TaskGen import extension, taskgen, after, before EXT_ASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP'] as_str = '${AS} ${ASFLAGS} ${_ASINCFLAGS} ${SRC} -o ${TGT}' Task.simple_task_type('asm', as_str, 'PINK', ext_out='.o', shell=False) @extension(EXT_ASM) def asm_hook(self, node): # create the compilation task: cpp or cc try: obj_ext = self.obj_ext except AttributeError: obj_ext = '_%d.o' % self.idx task = self.create_task('asm', node, node.change_ext(obj_ext)) self.compiled_tasks.append(task) self.meths.append('asm_incflags') @after('apply_obj_vars_cc') @after('apply_obj_vars_cxx') @before('apply_link') def asm_incflags(self): self.env.append_value('_ASINCFLAGS', self.env.ASINCFLAGS) var = ('cxx' in self.features) and 'CXX' or 'CC' self.env.append_value('_ASINCFLAGS', self.env['_%sINCFLAGS' % var]) def detect(conf): conf.find_program(['gas', 'as'], var='AS') if not conf.env.AS: conf.env.AS = conf.env.CC #conf.env.ASFLAGS = ['-c'] <- may be necesary for .S files ntdb-1.0/buildtools/wafadmin/Tools/gcc.py000066400000000000000000000075051224151530700204640ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2008 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 import os, sys import Configure, Options, Utils import ccroot, ar from Configure import conftest @conftest def find_gcc(conf): cc = conf.find_program(['gcc', 'cc'], var='CC', mandatory=True) cc = conf.cmd_to_list(cc) ccroot.get_cc_version(conf, cc, gcc=True) conf.env.CC_NAME = 'gcc' conf.env.CC = cc @conftest def gcc_common_flags(conf): v = conf.env # CPPFLAGS CCDEFINES _CCINCFLAGS _CCDEFFLAGS v['CCFLAGS_DEBUG'] = ['-g'] v['CCFLAGS_RELEASE'] = ['-O2'] v['CC_SRC_F'] = '' v['CC_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD v['CPPPATH_ST'] = '-I%s' # template for adding include paths # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = '' v['CCLNK_TGT_F'] = ['-o', ''] # shell hack for -MD v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STATICLIB_ST'] = '-l%s' v['STATICLIBPATH_ST'] = '-L%s' v['RPATH_ST'] = '-Wl,-rpath,%s' v['CCDEFINES_ST'] = '-D%s' v['SONAME_ST'] = '-Wl,-h,%s' v['SHLIB_MARKER'] = '-Wl,-Bdynamic' v['STATICLIB_MARKER'] = '-Wl,-Bstatic' v['FULLSTATIC_MARKER'] = '-static' # program v['program_PATTERN'] = '%s' # shared library v['shlib_CCFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro v['shlib_LINKFLAGS'] = ['-shared'] v['shlib_PATTERN'] = 'lib%s.so' # static lib v['staticlib_LINKFLAGS'] = ['-Wl,-Bstatic'] v['staticlib_PATTERN'] = 'lib%s.a' # osx stuff v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup'] v['CCFLAGS_MACBUNDLE'] = ['-fPIC'] v['macbundle_PATTERN'] = '%s.bundle' @conftest def gcc_modifier_win32(conf): v = conf.env v['program_PATTERN'] = '%s.exe' v['shlib_PATTERN'] = '%s.dll' v['implib_PATTERN'] = 'lib%s.dll.a' v['IMPLIB_ST'] = '-Wl,--out-implib,%s' dest_arch = v['DEST_CPU'] v['shlib_CCFLAGS'] = ['-DPIC'] v.append_value('shlib_CCFLAGS', '-DDLL_EXPORT') # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', '-Wl,--enable-auto-import') @conftest def gcc_modifier_cygwin(conf): gcc_modifier_win32(conf) v = conf.env v['shlib_PATTERN'] = 'cyg%s.dll' v.append_value('shlib_LINKFLAGS', '-Wl,--enable-auto-image-base') @conftest def gcc_modifier_darwin(conf): v = conf.env v['shlib_CCFLAGS'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1'] v['shlib_LINKFLAGS'] = ['-dynamiclib'] v['shlib_PATTERN'] = 'lib%s.dylib' v['staticlib_LINKFLAGS'] = [] v['SHLIB_MARKER'] = '' v['STATICLIB_MARKER'] = '' v['SONAME_ST'] = '' @conftest def gcc_modifier_aix(conf): v = conf.env v['program_LINKFLAGS'] = ['-Wl,-brtl'] v['shlib_LINKFLAGS'] = ['-shared','-Wl,-brtl,-bexpfull'] v['SHLIB_MARKER'] = '' @conftest def gcc_modifier_platform(conf): # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() gcc_modifier_func = globals().get('gcc_modifier_' + dest_os) if gcc_modifier_func: gcc_modifier_func(conf) def detect(conf): conf.find_gcc() conf.find_cpp() conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ntdb-1.0/buildtools/wafadmin/Tools/gdc.py000066400000000000000000000022701224151530700204570ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) import sys import Utils, ar from Configure import conftest @conftest def find_gdc(conf): conf.find_program('gdc', var='D_COMPILER', mandatory=True) @conftest def common_flags_gdc(conf): v = conf.env # _DFLAGS _DIMPORTFLAGS # for mory info about the meaning of this dict see dmd.py v['DFLAGS'] = [] v['D_SRC_F'] = '' v['D_TGT_F'] = ['-c', '-o', ''] v['DPATH_ST'] = '-I%s' # template for adding import paths # linker v['D_LINKER'] = v['D_COMPILER'] v['DLNK_SRC_F'] = '' v['DLNK_TGT_F'] = ['-o', ''] v['DLIB_ST'] = '-l%s' # template for adding libs v['DLIBPATH_ST'] = '-L%s' # template for adding libpaths # debug levels v['DLINKFLAGS'] = [] v['DFLAGS_OPTIMIZED'] = ['-O3'] v['DFLAGS_DEBUG'] = ['-O0'] v['DFLAGS_ULTRADEBUG'] = ['-O0'] v['D_shlib_DFLAGS'] = [] v['D_shlib_LINKFLAGS'] = ['-shared'] v['DHEADER_ext'] = '.di' v['D_HDR_F'] = '-fintfc -fintfc-file=' def detect(conf): conf.find_gdc() conf.check_tool('ar') conf.check_tool('d') conf.common_flags_gdc() conf.d_platform_flags() ntdb-1.0/buildtools/wafadmin/Tools/glib2.py000066400000000000000000000116601224151530700207240ustar00rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2008 (ita) "GLib2 support" import Task, Utils from TaskGen import taskgen, before, after, feature # # glib-genmarshal # @taskgen def add_marshal_file(self, filename, prefix): if not hasattr(self, 'marshal_list'): self.marshal_list = [] self.meths.append('process_marshal') self.marshal_list.append((filename, prefix)) @before('apply_core') def process_marshal(self): for f, prefix in getattr(self, 'marshal_list', []): node = self.path.find_resource(f) if not node: raise Utils.WafError('file not found %r' % f) h_node = node.change_ext('.h') c_node = node.change_ext('.c') task = self.create_task('glib_genmarshal', node, [h_node, c_node]) task.env.GLIB_GENMARSHAL_PREFIX = prefix self.allnodes.append(c_node) def genmarshal_func(self): bld = self.inputs[0].__class__.bld get = self.env.get_flat cmd1 = "%s %s --prefix=%s --header > %s" % ( get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(self.env), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[0].abspath(self.env) ) ret = bld.exec_command(cmd1) if ret: return ret #print self.outputs[1].abspath(self.env) f = open(self.outputs[1].abspath(self.env), 'wb') c = '''#include "%s"\n''' % self.outputs[0].name f.write(c) f.close() cmd2 = "%s %s --prefix=%s --body >> %s" % ( get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(self.env), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[1].abspath(self.env) ) ret = Utils.exec_command(cmd2) if ret: return ret # # glib-mkenums # @taskgen def add_enums_from_template(self, source='', target='', template='', comments=''): if not hasattr(self, 'enums_list'): self.enums_list = [] self.meths.append('process_enums') self.enums_list.append({'source': source, 'target': target, 'template': template, 'file-head': '', 'file-prod': '', 'file-tail': '', 'enum-prod': '', 'value-head': '', 'value-prod': '', 'value-tail': '', 'comments': comments}) @taskgen def add_enums(self, source='', target='', file_head='', file_prod='', file_tail='', enum_prod='', value_head='', value_prod='', value_tail='', comments=''): if not hasattr(self, 'enums_list'): self.enums_list = [] self.meths.append('process_enums') self.enums_list.append({'source': source, 'template': '', 'target': target, 'file-head': file_head, 'file-prod': file_prod, 'file-tail': file_tail, 'enum-prod': enum_prod, 'value-head': value_head, 'value-prod': value_prod, 'value-tail': value_tail, 'comments': comments}) @before('apply_core') def process_enums(self): for enum in getattr(self, 'enums_list', []): task = self.create_task('glib_mkenums') env = task.env inputs = [] # process the source source_list = self.to_list(enum['source']) if not source_list: raise Utils.WafError('missing source ' + str(enum)) source_list = [self.path.find_resource(k) for k in source_list] inputs += source_list env['GLIB_MKENUMS_SOURCE'] = [k.srcpath(env) for k in source_list] # find the target if not enum['target']: raise Utils.WafError('missing target ' + str(enum)) tgt_node = self.path.find_or_declare(enum['target']) if tgt_node.name.endswith('.c'): self.allnodes.append(tgt_node) env['GLIB_MKENUMS_TARGET'] = tgt_node.abspath(env) options = [] if enum['template']: # template, if provided template_node = self.path.find_resource(enum['template']) options.append('--template %s' % (template_node.abspath(env))) inputs.append(template_node) params = {'file-head' : '--fhead', 'file-prod' : '--fprod', 'file-tail' : '--ftail', 'enum-prod' : '--eprod', 'value-head' : '--vhead', 'value-prod' : '--vprod', 'value-tail' : '--vtail', 'comments': '--comments'} for param, option in params.iteritems(): if enum[param]: options.append('%s %r' % (option, enum[param])) env['GLIB_MKENUMS_OPTIONS'] = ' '.join(options) # update the task instance task.set_inputs(inputs) task.set_outputs(tgt_node) Task.task_type_from_func('glib_genmarshal', func=genmarshal_func, vars=['GLIB_GENMARSHAL_PREFIX', 'GLIB_GENMARSHAL'], color='BLUE', before='cc cxx') Task.simple_task_type('glib_mkenums', '${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}', color='PINK', before='cc cxx') def detect(conf): glib_genmarshal = conf.find_program('glib-genmarshal', var='GLIB_GENMARSHAL') mk_enums_tool = conf.find_program('glib-mkenums', var='GLIB_MKENUMS') ntdb-1.0/buildtools/wafadmin/Tools/gnome.py000066400000000000000000000171011224151530700210260ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2008 (ita) "Gnome support" import os, re import TaskGen, Utils, Runner, Task, Build, Options, Logs import cc from Logs import error from TaskGen import taskgen, before, after, feature n1_regexp = re.compile('(.*)', re.M) n2_regexp = re.compile('(.*)', re.M) def postinstall_schemas(prog_name): if Build.bld.is_install: dir = Build.bld.get_install_path('${PREFIX}/etc/gconf/schemas/%s.schemas' % prog_name) if not Options.options.destdir: # add the gconf schema Utils.pprint('YELLOW', 'Installing GConf schema') command = 'gconftool-2 --install-schema-file=%s 1> /dev/null' % dir ret = Utils.exec_command(command) else: Utils.pprint('YELLOW', 'GConf schema not installed. After install, run this:') Utils.pprint('YELLOW', 'gconftool-2 --install-schema-file=%s' % dir) def postinstall_icons(): dir = Build.bld.get_install_path('${DATADIR}/icons/hicolor') if Build.bld.is_install: if not Options.options.destdir: # update the pixmap cache directory Utils.pprint('YELLOW', "Updating Gtk icon cache.") command = 'gtk-update-icon-cache -q -f -t %s' % dir ret = Utils.exec_command(command) else: Utils.pprint('YELLOW', 'Icon cache not updated. After install, run this:') Utils.pprint('YELLOW', 'gtk-update-icon-cache -q -f -t %s' % dir) def postinstall_scrollkeeper(prog_name): if Build.bld.is_install: # now the scrollkeeper update if we can write to the log file if os.access('/var/log/scrollkeeper.log', os.W_OK): dir1 = Build.bld.get_install_path('${PREFIX}/var/scrollkeeper') dir2 = Build.bld.get_install_path('${DATADIR}/omf/%s' % prog_name) command = 'scrollkeeper-update -q -p %s -o %s' % (dir1, dir2) ret = Utils.exec_command(command) def postinstall(prog_name='myapp', schemas=1, icons=1, scrollkeeper=1): if schemas: postinstall_schemas(prog_name) if icons: postinstall_icons() if scrollkeeper: postinstall_scrollkeeper(prog_name) # OBSOLETE class gnome_doc_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('gnome_doc') def init_gnome_doc(self): self.default_install_path = '${PREFIX}/share' @feature('gnome_doc') @after('init_gnome_doc') def apply_gnome_doc(self): self.env['APPNAME'] = self.doc_module lst = self.to_list(self.doc_linguas) bld = self.bld lst.append('C') for x in lst: if not x == 'C': tsk = self.create_task('xml2po') node = self.path.find_resource(x+'/'+x+'.po') src = self.path.find_resource('C/%s.xml' % self.doc_module) out = self.path.find_or_declare('%s/%s.xml' % (x, self.doc_module)) tsk.set_inputs([node, src]) tsk.set_outputs(out) else: out = self.path.find_resource('%s/%s.xml' % (x, self.doc_module)) tsk2 = self.create_task('xsltproc2po') out2 = self.path.find_or_declare('%s/%s-%s.omf' % (x, self.doc_module, x)) tsk2.set_outputs(out2) node = self.path.find_resource(self.doc_module+".omf.in") tsk2.inputs = [node, out] tsk2.run_after.append(tsk) if bld.is_install: path = self.install_path + '/gnome/help/%s/%s' % (self.doc_module, x) bld.install_files(self.install_path + '/omf', out2, env=self.env) for y in self.to_list(self.doc_figures): try: os.stat(self.path.abspath() + '/' + x + '/' + y) bld.install_as(path + '/' + y, self.path.abspath() + '/' + x + '/' + y) except: bld.install_as(path + '/' + y, self.path.abspath() + '/C/' + y) bld.install_as(path + '/%s.xml' % self.doc_module, out.abspath(self.env)) if x == 'C': xmls = self.to_list(self.doc_includes) xmls.append(self.doc_entities) for z in xmls: out = self.path.find_resource('%s/%s' % (x, z)) bld.install_as(path + '/%s' % z, out.abspath(self.env)) # OBSOLETE class xml_to_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('xml_to') def init_xml_to(self): Utils.def_attrs(self, source = 'xmlfile', xslt = 'xlsltfile', target = 'hey', default_install_path = '${PREFIX}', task_created = None) @feature('xml_to') @after('init_xml_to') def apply_xml_to(self): xmlfile = self.path.find_resource(self.source) xsltfile = self.path.find_resource(self.xslt) tsk = self.create_task('xmlto', [xmlfile, xsltfile], xmlfile.change_ext('html')) tsk.install_path = self.install_path def sgml_scan(self): node = self.inputs[0] env = self.env variant = node.variant(env) fi = open(node.abspath(env), 'r') content = fi.read() fi.close() # we should use a sgml parser :-/ name = n1_regexp.findall(content)[0] num = n2_regexp.findall(content)[0] doc_name = name+'.'+num if not self.outputs: self.outputs = [self.generator.path.find_or_declare(doc_name)] return ([], [doc_name]) class gnome_sgml2man_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('gnome_sgml2man') def apply_gnome_sgml2man(self): """ we could make it more complicated, but for now we just scan the document each time """ assert(getattr(self, 'appname', None)) def install_result(task): out = task.outputs[0] name = out.name ext = name[-1] env = task.env self.bld.install_files('${DATADIR}/man/man%s/' % ext, out, env) self.bld.rescan(self.path) for name in self.bld.cache_dir_contents[self.path.id]: base, ext = os.path.splitext(name) if ext != '.sgml': continue task = self.create_task('sgml2man') task.set_inputs(self.path.find_resource(name)) task.task_generator = self if self.bld.is_install: task.install = install_result # no outputs, the scanner does it # no caching for now, this is not a time-critical feature # in the future the scanner can be used to do more things (find dependencies, etc) task.scan() cls = Task.simple_task_type('sgml2man', '${SGML2MAN} -o ${TGT[0].bld_dir(env)} ${SRC} > /dev/null', color='BLUE') cls.scan = sgml_scan cls.quiet = 1 Task.simple_task_type('xmlto', '${XMLTO} html -m ${SRC[1].abspath(env)} ${SRC[0].abspath(env)}') Task.simple_task_type('xml2po', '${XML2PO} ${XML2POFLAGS} ${SRC} > ${TGT}', color='BLUE') # how do you expect someone to understand this?! xslt_magic = """${XSLTPROC2PO} -o ${TGT[0].abspath(env)} \ --stringparam db2omf.basename ${APPNAME} \ --stringparam db2omf.format docbook \ --stringparam db2omf.lang ${TGT[0].abspath(env)[:-4].split('-')[-1]} \ --stringparam db2omf.dtd '-//OASIS//DTD DocBook XML V4.3//EN' \ --stringparam db2omf.omf_dir ${PREFIX}/share/omf \ --stringparam db2omf.help_dir ${PREFIX}/share/gnome/help \ --stringparam db2omf.omf_in ${SRC[0].abspath(env)} \ --stringparam db2omf.scrollkeeper_cl ${SCROLLKEEPER_DATADIR}/Templates/C/scrollkeeper_cl.xml \ ${DB2OMF} ${SRC[1].abspath(env)}""" #--stringparam db2omf.dtd '-//OASIS//DTD DocBook XML V4.3//EN' \ Task.simple_task_type('xsltproc2po', xslt_magic, color='BLUE') def detect(conf): conf.check_tool('gnu_dirs glib2 dbus') sgml2man = conf.find_program('docbook2man', var='SGML2MAN') def getstr(varname): return getattr(Options.options, varname, '') # addefine also sets the variable to the env conf.define('GNOMELOCALEDIR', os.path.join(conf.env['DATADIR'], 'locale')) xml2po = conf.find_program('xml2po', var='XML2PO') xsltproc2po = conf.find_program('xsltproc', var='XSLTPROC2PO') conf.env['XML2POFLAGS'] = '-e -p' conf.env['SCROLLKEEPER_DATADIR'] = Utils.cmd_output("scrollkeeper-config --pkgdatadir", silent=1).strip() conf.env['DB2OMF'] = Utils.cmd_output("/usr/bin/pkg-config --variable db2omf gnome-doc-utils", silent=1).strip() def set_options(opt): opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]') ntdb-1.0/buildtools/wafadmin/Tools/gnu_dirs.py000066400000000000000000000100341224151530700215310ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 """ To use this module do not forget to call opt.tool_options('gnu_dirs') AND conf.check_tool('gnu_dirs') Add options for the standard GNU directories, this tool will add the options found in autotools, and will update the environment with the following installation variables: * PREFIX : architecture-independent files [/usr/local] * EXEC_PREFIX : architecture-dependent files [PREFIX] * BINDIR : user executables [EXEC_PREFIX/bin] * SBINDIR : user executables [EXEC_PREFIX/sbin] * LIBEXECDIR : program executables [EXEC_PREFIX/libexec] * SYSCONFDIR : read-only single-machine data [PREFIX/etc] * SHAREDSTATEDIR : modifiable architecture-independent data [PREFIX/com] * LOCALSTATEDIR : modifiable single-machine data [PREFIX/var] * LIBDIR : object code libraries [EXEC_PREFIX/lib] * INCLUDEDIR : C header files [PREFIX/include] * OLDINCLUDEDIR : C header files for non-gcc [/usr/include] * DATAROOTDIR : read-only arch.-independent data root [PREFIX/share] * DATADIR : read-only architecture-independent data [DATAROOTDIR] * INFODIR : info documentation [DATAROOTDIR/info] * LOCALEDIR : locale-dependent data [DATAROOTDIR/locale] * MANDIR : man documentation [DATAROOTDIR/man] * DOCDIR : documentation root [DATAROOTDIR/doc/telepathy-glib] * HTMLDIR : html documentation [DOCDIR] * DVIDIR : dvi documentation [DOCDIR] * PDFDIR : pdf documentation [DOCDIR] * PSDIR : ps documentation [DOCDIR] """ import Utils, Options _options = [x.split(', ') for x in ''' bindir, user executables, ${EXEC_PREFIX}/bin sbindir, system admin executables, ${EXEC_PREFIX}/sbin libexecdir, program executables, ${EXEC_PREFIX}/libexec sysconfdir, read-only single-machine data, ${PREFIX}/etc sharedstatedir, modifiable architecture-independent data, ${PREFIX}/com localstatedir, modifiable single-machine data, ${PREFIX}/var libdir, object code libraries, ${EXEC_PREFIX}/lib includedir, C header files, ${PREFIX}/include oldincludedir, C header files for non-gcc, /usr/include datarootdir, read-only arch.-independent data root, ${PREFIX}/share datadir, read-only architecture-independent data, ${DATAROOTDIR} infodir, info documentation, ${DATAROOTDIR}/info localedir, locale-dependent data, ${DATAROOTDIR}/locale mandir, man documentation, ${DATAROOTDIR}/man docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE} htmldir, html documentation, ${DOCDIR} dvidir, dvi documentation, ${DOCDIR} pdfdir, pdf documentation, ${DOCDIR} psdir, ps documentation, ${DOCDIR} '''.split('\n') if x] def detect(conf): def get_param(varname, default): return getattr(Options.options, varname, '') or default env = conf.env env['EXEC_PREFIX'] = get_param('EXEC_PREFIX', env['PREFIX']) env['PACKAGE'] = Utils.g_module.APPNAME complete = False iter = 0 while not complete and iter < len(_options) + 1: iter += 1 complete = True for name, help, default in _options: name = name.upper() if not env[name]: try: env[name] = Utils.subst_vars(get_param(name, default), env) except TypeError: complete = False if not complete: lst = [name for name, _, _ in _options if not env[name.upper()]] raise Utils.WafError('Variable substitution failure %r' % lst) def set_options(opt): inst_dir = opt.add_option_group('Installation directories', 'By default, "waf install" will put the files in\ "/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\ than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"') for k in ('--prefix', '--destdir'): option = opt.parser.get_option(k) if option: opt.parser.remove_option(k) inst_dir.add_option(option) inst_dir.add_option('--exec-prefix', help = 'installation prefix [Default: ${PREFIX}]', default = '', dest = 'EXEC_PREFIX') dirs_options = opt.add_option_group('Pre-defined installation directories', '') for name, help, default in _options: option_name = '--' + name str_default = default str_help = '%s [Default: %s]' % (help, str_default) dirs_options.add_option(option_name, help=str_help, default='', dest=name.upper()) ntdb-1.0/buildtools/wafadmin/Tools/gob2.py000066400000000000000000000005361224151530700205560ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 import TaskGen TaskGen.declare_chain( name = 'gob2', rule = '${GOB2} -o ${TGT[0].bld_dir(env)} ${GOB2FLAGS} ${SRC}', ext_in = '.gob', ext_out = '.c' ) def detect(conf): gob2 = conf.find_program('gob2', var='GOB2', mandatory=True) conf.env['GOB2'] = gob2 conf.env['GOB2FLAGS'] = '' ntdb-1.0/buildtools/wafadmin/Tools/gxx.py000066400000000000000000000075061224151530700205370ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 import os, sys import Configure, Options, Utils import ccroot, ar from Configure import conftest @conftest def find_gxx(conf): cxx = conf.find_program(['g++', 'c++'], var='CXX', mandatory=True) cxx = conf.cmd_to_list(cxx) ccroot.get_cc_version(conf, cxx, gcc=True) conf.env.CXX_NAME = 'gcc' conf.env.CXX = cxx @conftest def gxx_common_flags(conf): v = conf.env # CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS v['CXXFLAGS_DEBUG'] = ['-g'] v['CXXFLAGS_RELEASE'] = ['-O2'] v['CXX_SRC_F'] = '' v['CXX_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD v['CPPPATH_ST'] = '-I%s' # template for adding include paths # linker if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX'] v['CXXLNK_SRC_F'] = '' v['CXXLNK_TGT_F'] = ['-o', ''] # shell hack for -MD v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STATICLIB_ST'] = '-l%s' v['STATICLIBPATH_ST'] = '-L%s' v['RPATH_ST'] = '-Wl,-rpath,%s' v['CXXDEFINES_ST'] = '-D%s' v['SONAME_ST'] = '-Wl,-h,%s' v['SHLIB_MARKER'] = '-Wl,-Bdynamic' v['STATICLIB_MARKER'] = '-Wl,-Bstatic' v['FULLSTATIC_MARKER'] = '-static' # program v['program_PATTERN'] = '%s' # shared library v['shlib_CXXFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro v['shlib_LINKFLAGS'] = ['-shared'] v['shlib_PATTERN'] = 'lib%s.so' # static lib v['staticlib_LINKFLAGS'] = ['-Wl,-Bstatic'] v['staticlib_PATTERN'] = 'lib%s.a' # osx stuff v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup'] v['CCFLAGS_MACBUNDLE'] = ['-fPIC'] v['macbundle_PATTERN'] = '%s.bundle' @conftest def gxx_modifier_win32(conf): v = conf.env v['program_PATTERN'] = '%s.exe' v['shlib_PATTERN'] = '%s.dll' v['implib_PATTERN'] = 'lib%s.dll.a' v['IMPLIB_ST'] = '-Wl,--out-implib,%s' dest_arch = v['DEST_CPU'] v['shlib_CXXFLAGS'] = [] v.append_value('shlib_CXXFLAGS', '-DDLL_EXPORT') # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', '-Wl,--enable-auto-import') @conftest def gxx_modifier_cygwin(conf): gxx_modifier_win32(conf) v = conf.env v['shlib_PATTERN'] = 'cyg%s.dll' v.append_value('shlib_LINKFLAGS', '-Wl,--enable-auto-image-base') @conftest def gxx_modifier_darwin(conf): v = conf.env v['shlib_CXXFLAGS'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1'] v['shlib_LINKFLAGS'] = ['-dynamiclib'] v['shlib_PATTERN'] = 'lib%s.dylib' v['staticlib_LINKFLAGS'] = [] v['SHLIB_MARKER'] = '' v['STATICLIB_MARKER'] = '' v['SONAME_ST'] = '' @conftest def gxx_modifier_aix(conf): v = conf.env v['program_LINKFLAGS'] = ['-Wl,-brtl'] v['shlib_LINKFLAGS'] = ['-shared', '-Wl,-brtl,-bexpfull'] v['SHLIB_MARKER'] = '' @conftest def gxx_modifier_platform(conf): # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() gxx_modifier_func = globals().get('gxx_modifier_' + dest_os) if gxx_modifier_func: gxx_modifier_func(conf) def detect(conf): conf.find_gxx() conf.find_cpp() conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() ntdb-1.0/buildtools/wafadmin/Tools/icc.py000066400000000000000000000014351224151530700204620ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Stian Selnes, 2008 # Thomas Nagy 2009 import os, sys import Configure, Options, Utils import ccroot, ar, gcc from Configure import conftest @conftest def find_icc(conf): if sys.platform == 'cygwin': conf.fatal('The Intel compiler does not work on Cygwin') v = conf.env cc = None if v['CC']: cc = v['CC'] elif 'CC' in conf.environ: cc = conf.environ['CC'] if not cc: cc = conf.find_program('icc', var='CC') if not cc: cc = conf.find_program('ICL', var='CC') if not cc: conf.fatal('Intel C Compiler (icc) was not found') cc = conf.cmd_to_list(cc) ccroot.get_cc_version(conf, cc, icc=True) v['CC'] = cc v['CC_NAME'] = 'icc' detect = ''' find_icc find_ar gcc_common_flags gcc_modifier_platform cc_load_tools cc_add_flags link_add_flags ''' ntdb-1.0/buildtools/wafadmin/Tools/icpc.py000066400000000000000000000013551224151530700206430ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2009 import os, sys import Configure, Options, Utils import ccroot, ar, gxx from Configure import conftest @conftest def find_icpc(conf): if sys.platform == 'cygwin': conf.fatal('The Intel compiler does not work on Cygwin') v = conf.env cxx = None if v['CXX']: cxx = v['CXX'] elif 'CXX' in conf.environ: cxx = conf.environ['CXX'] if not cxx: cxx = conf.find_program('icpc', var='CXX') if not cxx: conf.fatal('Intel C++ Compiler (icpc) was not found') cxx = conf.cmd_to_list(cxx) ccroot.get_cc_version(conf, cxx, icc=True) v['CXX'] = cxx v['CXX_NAME'] = 'icc' detect = ''' find_icpc find_ar gxx_common_flags gxx_modifier_platform cxx_load_tools cxx_add_flags link_add_flags ''' ntdb-1.0/buildtools/wafadmin/Tools/intltool.py000066400000000000000000000112411224151530700215640ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) "intltool support" import os, re import Configure, TaskGen, Task, Utils, Runner, Options, Build, config_c from TaskGen import feature, before, taskgen from Logs import error """ Usage: bld(features='intltool_in', source='a.po b.po', podir='po', cache='.intlcache', flags='') """ class intltool_in_taskgen(TaskGen.task_gen): """deprecated""" def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @before('apply_core') @feature('intltool_in') def iapply_intltool_in_f(self): try: self.meths.remove('apply_core') except ValueError: pass for i in self.to_list(self.source): node = self.path.find_resource(i) podir = getattr(self, 'podir', 'po') podirnode = self.path.find_dir(podir) if not podirnode: error("could not find the podir %r" % podir) continue cache = getattr(self, 'intlcache', '.intlcache') self.env['INTLCACHE'] = os.path.join(self.path.bldpath(self.env), podir, cache) self.env['INTLPODIR'] = podirnode.srcpath(self.env) self.env['INTLFLAGS'] = getattr(self, 'flags', ['-q', '-u', '-c']) task = self.create_task('intltool', node, node.change_ext('')) task.install_path = self.install_path class intltool_po_taskgen(TaskGen.task_gen): """deprecated""" def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('intltool_po') def apply_intltool_po(self): try: self.meths.remove('apply_core') except ValueError: pass self.default_install_path = '${LOCALEDIR}' appname = getattr(self, 'appname', 'set_your_app_name') podir = getattr(self, 'podir', '') def install_translation(task): out = task.outputs[0] filename = out.name (langname, ext) = os.path.splitext(filename) inst_file = langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo' self.bld.install_as(os.path.join(self.install_path, inst_file), out, self.env, self.chmod) linguas = self.path.find_resource(os.path.join(podir, 'LINGUAS')) if linguas: # scan LINGUAS file for locales to process file = open(linguas.abspath()) langs = [] for line in file.readlines(): # ignore lines containing comments if not line.startswith('#'): langs += line.split() file.close() re_linguas = re.compile('[-a-zA-Z_@.]+') for lang in langs: # Make sure that we only process lines which contain locales if re_linguas.match(lang): node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po')) task = self.create_task('po') task.set_inputs(node) task.set_outputs(node.change_ext('.mo')) if self.bld.is_install: task.install = install_translation else: Utils.pprint('RED', "Error no LINGUAS file found in po directory") Task.simple_task_type('po', '${POCOM} -o ${TGT} ${SRC}', color='BLUE', shell=False) Task.simple_task_type('intltool', '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}', color='BLUE', after="cc_link cxx_link", shell=False) def detect(conf): pocom = conf.find_program('msgfmt') if not pocom: # if msgfmt should not be mandatory, catch the thrown exception in your wscript conf.fatal('The program msgfmt (gettext) is mandatory!') conf.env['POCOM'] = pocom # NOTE: it is possible to set INTLTOOL in the environment, but it must not have spaces in it intltool = conf.find_program('intltool-merge', var='INTLTOOL') if not intltool: # if intltool-merge should not be mandatory, catch the thrown exception in your wscript if Options.platform == 'win32': perl = conf.find_program('perl', var='PERL') if not perl: conf.fatal('The program perl (required by intltool) could not be found') intltooldir = Configure.find_file('intltool-merge', os.environ['PATH'].split(os.pathsep)) if not intltooldir: conf.fatal('The program intltool-merge (intltool, gettext-devel) is mandatory!') conf.env['INTLTOOL'] = Utils.to_list(conf.env['PERL']) + [intltooldir + os.sep + 'intltool-merge'] conf.check_message('intltool', '', True, ' '.join(conf.env['INTLTOOL'])) else: conf.fatal('The program intltool-merge (intltool, gettext-devel) is mandatory!') def getstr(varname): return getattr(Options.options, varname, '') prefix = conf.env['PREFIX'] datadir = getstr('datadir') if not datadir: datadir = os.path.join(prefix,'share') conf.define('LOCALEDIR', os.path.join(datadir, 'locale')) conf.define('DATADIR', datadir) if conf.env['CC'] or conf.env['CXX']: # Define to 1 if is present conf.check(header_name='locale.h') def set_options(opt): opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]') opt.add_option('--datadir', type='string', default='', dest='datadir', help='read-only application data') ntdb-1.0/buildtools/wafadmin/Tools/javaw.py000066400000000000000000000163211224151530700210340ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2008 (ita) """ Java support Javac is one of the few compilers that behaves very badly: * it outputs files where it wants to (-d is only for the package root) * it recompiles files silently behind your back * it outputs an undefined amount of files (inner classes) Fortunately, the convention makes it possible to use the build dir without too many problems for the moment Inner classes must be located and cleaned when a problem arise, for the moment waf does not track the production of inner classes. Adding all the files to a task and executing it if any of the input files change is only annoying for the compilation times Compilation can be run using Jython[1] rather than regular Python. Instead of running one of the following commands: ./waf configure python waf configure You would have to run: java -jar /path/to/jython.jar waf configure [1] http://www.jython.org/ """ import os, re from Configure import conf import TaskGen, Task, Utils, Options, Build from TaskGen import feature, before, taskgen class_check_source = ''' public class Test { public static void main(String[] argv) { Class lib; if (argv.length < 1) { System.err.println("Missing argument"); System.exit(77); } try { lib = Class.forName(argv[0]); } catch (ClassNotFoundException e) { System.err.println("ClassNotFoundException"); System.exit(1); } lib = null; System.exit(0); } } ''' @feature('jar') @before('apply_core') def jar_files(self): basedir = getattr(self, 'basedir', '.') destfile = getattr(self, 'destfile', 'test.jar') jaropts = getattr(self, 'jaropts', []) jarcreate = getattr(self, 'jarcreate', 'cf') dir = self.path.find_dir(basedir) if not dir: raise jaropts.append('-C') jaropts.append(dir.abspath(self.env)) jaropts.append('.') out = self.path.find_or_declare(destfile) tsk = self.create_task('jar_create') tsk.set_outputs(out) tsk.inputs = [x for x in dir.find_iter(src=0, bld=1) if x.id != out.id] tsk.env['JAROPTS'] = jaropts tsk.env['JARCREATE'] = jarcreate @feature('javac') @before('apply_core') def apply_java(self): Utils.def_attrs(self, jarname='', jaropts='', classpath='', sourcepath='.', srcdir='.', source_re='**/*.java', jar_mf_attributes={}, jar_mf_classpath=[]) if getattr(self, 'source_root', None): # old stuff self.srcdir = self.source_root nodes_lst = [] if not self.classpath: if not self.env['CLASSPATH']: self.env['CLASSPATH'] = '..' + os.pathsep + '.' else: self.env['CLASSPATH'] = self.classpath srcdir_node = self.path.find_dir(self.srcdir) if not srcdir_node: raise Utils.WafError('could not find srcdir %r' % self.srcdir) src_nodes = [x for x in srcdir_node.ant_glob(self.source_re, flat=False)] bld_nodes = [x.change_ext('.class') for x in src_nodes] self.env['OUTDIR'] = [srcdir_node.bldpath(self.env)] tsk = self.create_task('javac') tsk.set_inputs(src_nodes) tsk.set_outputs(bld_nodes) if getattr(self, 'compat', None): tsk.env.append_value('JAVACFLAGS', ['-source', self.compat]) if hasattr(self, 'sourcepath'): fold = [self.path.find_dir(x) for x in self.to_list(self.sourcepath)] names = os.pathsep.join([x.srcpath() for x in fold]) else: names = srcdir_node.srcpath() if names: tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names]) if self.jarname: jtsk = self.create_task('jar_create', bld_nodes, self.path.find_or_declare(self.jarname)) jtsk.set_run_after(tsk) if not self.env.JAROPTS: if self.jaropts: self.env.JAROPTS = self.jaropts else: dirs = '.' self.env.JAROPTS = ['-C', ''.join(self.env['OUTDIR']), dirs] Task.simple_task_type('jar_create', '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}', color='GREEN', shell=False) cls = Task.simple_task_type('javac', '${JAVAC} -classpath ${CLASSPATH} -d ${OUTDIR} ${JAVACFLAGS} ${SRC}', shell=False) cls.color = 'BLUE' def post_run_javac(self): """this is for cleaning the folder javac creates single files for inner classes but it is not possible to know which inner classes in advance""" par = {} for x in self.inputs: par[x.parent.id] = x.parent inner = {} for k in par.values(): path = k.abspath(self.env) lst = os.listdir(path) for u in lst: if u.find('$') >= 0: inner_class_node = k.find_or_declare(u) inner[inner_class_node.id] = inner_class_node to_add = set(inner.keys()) - set([x.id for x in self.outputs]) for x in to_add: self.outputs.append(inner[x]) self.cached = True # disable the cache here - inner classes are a problem return Task.Task.post_run(self) cls.post_run = post_run_javac def detect(conf): # If JAVA_PATH is set, we prepend it to the path list java_path = conf.environ['PATH'].split(os.pathsep) v = conf.env if 'JAVA_HOME' in conf.environ: java_path = [os.path.join(conf.environ['JAVA_HOME'], 'bin')] + java_path conf.env['JAVA_HOME'] = [conf.environ['JAVA_HOME']] for x in 'javac java jar'.split(): conf.find_program(x, var=x.upper(), path_list=java_path) conf.env[x.upper()] = conf.cmd_to_list(conf.env[x.upper()]) v['JAVA_EXT'] = ['.java'] if 'CLASSPATH' in conf.environ: v['CLASSPATH'] = conf.environ['CLASSPATH'] if not v['JAR']: conf.fatal('jar is required for making java packages') if not v['JAVAC']: conf.fatal('javac is required for compiling java classes') v['JARCREATE'] = 'cf' # can use cvf @conf def check_java_class(self, classname, with_classpath=None): """Check if the specified java class is installed""" import shutil javatestdir = '.waf-javatest' classpath = javatestdir if self.env['CLASSPATH']: classpath += os.pathsep + self.env['CLASSPATH'] if isinstance(with_classpath, str): classpath += os.pathsep + with_classpath shutil.rmtree(javatestdir, True) os.mkdir(javatestdir) java_file = open(os.path.join(javatestdir, 'Test.java'), 'w') java_file.write(class_check_source) java_file.close() # Compile the source Utils.exec_command(self.env['JAVAC'] + [os.path.join(javatestdir, 'Test.java')], shell=False) # Try to run the app cmd = self.env['JAVA'] + ['-cp', classpath, 'Test', classname] self.log.write("%s\n" % str(cmd)) found = Utils.exec_command(cmd, shell=False, log=self.log) self.check_message('Java class %s' % classname, "", not found) shutil.rmtree(javatestdir, True) return found @conf def check_jni_headers(conf): """ Check for jni headers and libraries On success the environment variable xxx_JAVA is added for uselib """ if not conf.env.CC_NAME and not conf.env.CXX_NAME: conf.fatal('load a compiler first (gcc, g++, ..)') if not conf.env.JAVA_HOME: conf.fatal('set JAVA_HOME in the system environment') # jni requires the jvm javaHome = conf.env['JAVA_HOME'][0] b = Build.BuildContext() b.load_dirs(conf.srcdir, conf.blddir) dir = b.root.find_dir(conf.env.JAVA_HOME[0] + '/include') f = dir.ant_glob('**/(jni|jni_md).h', flat=False) incDirs = [x.parent.abspath() for x in f] dir = b.root.find_dir(conf.env.JAVA_HOME[0]) f = dir.ant_glob('**/*jvm.(so|dll)', flat=False) libDirs = [x.parent.abspath() for x in f] or [javaHome] for i, d in enumerate(libDirs): if conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm', libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA'): break else: conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs) ntdb-1.0/buildtools/wafadmin/Tools/kde4.py000066400000000000000000000044431224151530700205550ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) import os, sys, re import Options, TaskGen, Task, Utils from TaskGen import taskgen, feature, after class msgfmt_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('msgfmt') def init_msgfmt(self): #langs = '' # for example "foo/fr foo/br" self.default_install_path = '${KDE4_LOCALE_INSTALL_DIR}' @feature('msgfmt') @after('init_msgfmt') def apply_msgfmt(self): for lang in self.to_list(self.langs): node = self.path.find_resource(lang+'.po') task = self.create_task('msgfmt', node, node.change_ext('.mo')) if not self.bld.is_install: continue langname = lang.split('/') langname = langname[-1] task.install_path = self.install_path + os.sep + langname + os.sep + 'LC_MESSAGES' task.filename = getattr(self, 'appname', 'set_your_appname') + '.mo' task.chmod = self.chmod def detect(conf): kdeconfig = conf.find_program('kde4-config') if not kdeconfig: conf.fatal('we need kde4-config') prefix = Utils.cmd_output('%s --prefix' % kdeconfig, silent=True).strip() file = '%s/share/apps/cmake/modules/KDELibsDependencies.cmake' % prefix try: os.stat(file) except OSError: file = '%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake' % prefix try: os.stat(file) except OSError: conf.fatal('could not open %s' % file) try: txt = Utils.readf(file) except (OSError, IOError): conf.fatal('could not read %s' % file) txt = txt.replace('\\\n', '\n') fu = re.compile('#(.*)\n') txt = fu.sub('', txt) setregexp = re.compile('([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)') found = setregexp.findall(txt) for (_, key, val) in found: #print key, val conf.env[key] = val # well well, i could just write an interpreter for cmake files conf.env['LIB_KDECORE']='kdecore' conf.env['LIB_KDEUI'] ='kdeui' conf.env['LIB_KIO'] ='kio' conf.env['LIB_KHTML'] ='khtml' conf.env['LIB_KPARTS'] ='kparts' conf.env['LIBPATH_KDECORE'] = conf.env['KDE4_LIB_INSTALL_DIR'] conf.env['CPPPATH_KDECORE'] = conf.env['KDE4_INCLUDE_INSTALL_DIR'] conf.env.append_value('CPPPATH_KDECORE', conf.env['KDE4_INCLUDE_INSTALL_DIR']+"/KDE") conf.env['MSGFMT'] = conf.find_program('msgfmt') Task.simple_task_type('msgfmt', '${MSGFMT} ${SRC} -o ${TGT}', color='BLUE', shell=False) ntdb-1.0/buildtools/wafadmin/Tools/libtool.py000066400000000000000000000223021224151530700213640ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Matthias Jahn, 2008, jahn matthias ath freenet punto de # Thomas Nagy, 2008 (ita) import sys, re, os, optparse import TaskGen, Task, Utils, preproc from Logs import error, debug, warn from TaskGen import taskgen, after, before, feature REVISION="0.1.3" """ if you want to use the code here, you must use something like this: obj = obj.create(...) obj.features.append("libtool") obj.vnum = "1.2.3" # optional, but versioned libraries are common """ # fake libtool files fakelibtool_vardeps = ['CXX', 'PREFIX'] def fakelibtool_build(task): # Writes a .la file, used by libtool env = task.env dest = open(task.outputs[0].abspath(env), 'w') sname = task.inputs[0].name fu = dest.write fu("# Generated by ltmain.sh - GNU libtool 1.5.18 - (pwn3d by BKsys II code name WAF)\n") if env['vnum']: nums = env['vnum'].split('.') libname = task.inputs[0].name name3 = libname+'.'+env['vnum'] name2 = libname+'.'+nums[0] name1 = libname fu("dlname='%s'\n" % name2) strn = " ".join([name3, name2, name1]) fu("library_names='%s'\n" % (strn) ) else: fu("dlname='%s'\n" % sname) fu("library_names='%s %s %s'\n" % (sname, sname, sname) ) fu("old_library=''\n") vars = ' '.join(env['libtoolvars']+env['LINKFLAGS']) fu("dependency_libs='%s'\n" % vars) fu("current=0\n") fu("age=0\nrevision=0\ninstalled=yes\nshouldnotlink=no\n") fu("dlopen=''\ndlpreopen=''\n") fu("libdir='%s/lib'\n" % env['PREFIX']) dest.close() return 0 def read_la_file(path): sp = re.compile(r'^([^=]+)=\'(.*)\'$') dc={} file = open(path, "r") for line in file.readlines(): try: #print sp.split(line.strip()) _, left, right, _ = sp.split(line.strip()) dc[left]=right except ValueError: pass file.close() return dc @feature("libtool") @after('apply_link') def apply_link_libtool(self): if self.type != 'program': linktask = self.link_task self.latask = self.create_task('fakelibtool', linktask.outputs, linktask.outputs[0].change_ext('.la')) if self.bld.is_install: self.bld.install_files('${PREFIX}/lib', linktask.outputs[0], self.env) @feature("libtool") @before('apply_core') def apply_libtool(self): self.env['vnum']=self.vnum paths=[] libs=[] libtool_files=[] libtool_vars=[] for l in self.env['LINKFLAGS']: if l[:2]=='-L': paths.append(l[2:]) elif l[:2]=='-l': libs.append(l[2:]) for l in libs: for p in paths: dict = read_la_file(p+'/lib'+l+'.la') linkflags2 = dict.get('dependency_libs', '') for v in linkflags2.split(): if v.endswith('.la'): libtool_files.append(v) libtool_vars.append(v) continue self.env.append_unique('LINKFLAGS', v) break self.env['libtoolvars']=libtool_vars while libtool_files: file = libtool_files.pop() dict = read_la_file(file) for v in dict['dependency_libs'].split(): if v[-3:] == '.la': libtool_files.append(v) continue self.env.append_unique('LINKFLAGS', v) Task.task_type_from_func('fakelibtool', vars=fakelibtool_vardeps, func=fakelibtool_build, color='BLUE', after="cc_link cxx_link static_link") class libtool_la_file: def __init__ (self, la_filename): self.__la_filename = la_filename #remove path and .la suffix self.linkname = str(os.path.split(la_filename)[-1])[:-3] if self.linkname.startswith("lib"): self.linkname = self.linkname[3:] # The name that we can dlopen(3). self.dlname = None # Names of this library self.library_names = None # The name of the static archive. self.old_library = None # Libraries that this one depends upon. self.dependency_libs = None # Version information for libIlmImf. self.current = None self.age = None self.revision = None # Is this an already installed library? self.installed = None # Should we warn about portability when linking against -modules? self.shouldnotlink = None # Files to dlopen/dlpreopen self.dlopen = None self.dlpreopen = None # Directory that this library needs to be installed in: self.libdir = '/usr/lib' if not self.__parse(): raise ValueError("file %s not found!!" %(la_filename)) def __parse(self): "Retrieve the variables from a file" if not os.path.isfile(self.__la_filename): return 0 la_file=open(self.__la_filename, 'r') for line in la_file: ln = line.strip() if not ln: continue if ln[0]=='#': continue (key, value) = str(ln).split('=', 1) key = key.strip() value = value.strip() if value == "no": value = False elif value == "yes": value = True else: try: value = int(value) except ValueError: value = value.strip("'") setattr(self, key, value) la_file.close() return 1 def get_libs(self): """return linkflags for this lib""" libs = [] if self.dependency_libs: libs = str(self.dependency_libs).strip().split() if libs == None: libs = [] # add la lib and libdir libs.insert(0, "-l%s" % self.linkname.strip()) libs.insert(0, "-L%s" % self.libdir.strip()) return libs def __str__(self): return '''\ dlname = "%(dlname)s" library_names = "%(library_names)s" old_library = "%(old_library)s" dependency_libs = "%(dependency_libs)s" version = %(current)s.%(age)s.%(revision)s installed = "%(installed)s" shouldnotlink = "%(shouldnotlink)s" dlopen = "%(dlopen)s" dlpreopen = "%(dlpreopen)s" libdir = "%(libdir)s"''' % self.__dict__ class libtool_config: def __init__ (self, la_filename): self.__libtool_la_file = libtool_la_file(la_filename) tmp = self.__libtool_la_file self.__version = [int(tmp.current), int(tmp.age), int(tmp.revision)] self.__sub_la_files = [] self.__sub_la_files.append(la_filename) self.__libs = None def __cmp__(self, other): """make it compareable with X.Y.Z versions (Y and Z are optional)""" if not other: return 1 othervers = [int(s) for s in str(other).split(".")] selfvers = self.__version return cmp(selfvers, othervers) def __str__(self): return "\n".join([ str(self.__libtool_la_file), ' '.join(self.__libtool_la_file.get_libs()), '* New getlibs:', ' '.join(self.get_libs()) ]) def __get_la_libs(self, la_filename): return libtool_la_file(la_filename).get_libs() def get_libs(self): """return the complete uniqe linkflags that do not contain .la files anymore""" libs_list = list(self.__libtool_la_file.get_libs()) libs_map = {} while len(libs_list) > 0: entry = libs_list.pop(0) if entry: if str(entry).endswith(".la"): ## prevents duplicate .la checks if entry not in self.__sub_la_files: self.__sub_la_files.append(entry) libs_list.extend(self.__get_la_libs(entry)) else: libs_map[entry]=1 self.__libs = libs_map.keys() return self.__libs def get_libs_only_L(self): if not self.__libs: self.get_libs() libs = self.__libs libs = [s for s in libs if str(s).startswith('-L')] return libs def get_libs_only_l(self): if not self.__libs: self.get_libs() libs = self.__libs libs = [s for s in libs if str(s).startswith('-l')] return libs def get_libs_only_other(self): if not self.__libs: self.get_libs() libs = self.__libs libs = [s for s in libs if not(str(s).startswith('-L')or str(s).startswith('-l'))] return libs def useCmdLine(): """parse cmdline args and control build""" usage = '''Usage: %prog [options] PathToFile.la example: %prog --atleast-version=2.0.0 /usr/lib/libIlmImf.la nor: %prog --libs /usr/lib/libamarok.la''' parser = optparse.OptionParser(usage) a = parser.add_option a("--version", dest = "versionNumber", action = "store_true", default = False, help = "output version of libtool-config" ) a("--debug", dest = "debug", action = "store_true", default = False, help = "enable debug" ) a("--libs", dest = "libs", action = "store_true", default = False, help = "output all linker flags" ) a("--libs-only-l", dest = "libs_only_l", action = "store_true", default = False, help = "output -l flags" ) a("--libs-only-L", dest = "libs_only_L", action = "store_true", default = False, help = "output -L flags" ) a("--libs-only-other", dest = "libs_only_other", action = "store_true", default = False, help = "output other libs (e.g. -pthread)" ) a("--atleast-version", dest = "atleast_version", default=None, help = "return 0 if the module is at least version ATLEAST_VERSION" ) a("--exact-version", dest = "exact_version", default=None, help = "return 0 if the module is exactly version EXACT_VERSION" ) a("--max-version", dest = "max_version", default=None, help = "return 0 if the module is at no newer than version MAX_VERSION" ) (options, args) = parser.parse_args() if len(args) != 1 and not options.versionNumber: parser.error("incorrect number of arguments") if options.versionNumber: print("libtool-config version %s" % REVISION) return 0 ltf = libtool_config(args[0]) if options.debug: print(ltf) if options.atleast_version: if ltf >= options.atleast_version: return 0 sys.exit(1) if options.exact_version: if ltf == options.exact_version: return 0 sys.exit(1) if options.max_version: if ltf <= options.max_version: return 0 sys.exit(1) def p(x): print(" ".join(x)) if options.libs: p(ltf.get_libs()) elif options.libs_only_l: p(ltf.get_libs_only_l()) elif options.libs_only_L: p(ltf.get_libs_only_L()) elif options.libs_only_other: p(ltf.get_libs_only_other()) return 0 if __name__ == '__main__': useCmdLine() ntdb-1.0/buildtools/wafadmin/Tools/lua.py000066400000000000000000000007511224151530700205050ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Sebastian Schlingmann, 2008 # Thomas Nagy, 2008 (ita) import TaskGen from TaskGen import taskgen, feature from Constants import * TaskGen.declare_chain( name = 'luac', rule = '${LUAC} -s -o ${TGT} ${SRC}', ext_in = '.lua', ext_out = '.luac', reentrant = False, install = 'LUADIR', # env variable ) @feature('lua') def init_lua(self): self.default_chmod = O755 def detect(conf): conf.find_program('luac', var='LUAC', mandatory = True) ntdb-1.0/buildtools/wafadmin/Tools/misc.py000066400000000000000000000276651224151530700206740ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) """ Custom objects: - execute a function everytime - copy a file somewhere else """ import shutil, re, os import TaskGen, Node, Task, Utils, Build, Constants from TaskGen import feature, taskgen, after, before from Logs import debug def copy_func(tsk): "Make a file copy. This might be used to make other kinds of file processing (even calling a compiler is possible)" env = tsk.env infile = tsk.inputs[0].abspath(env) outfile = tsk.outputs[0].abspath(env) try: shutil.copy2(infile, outfile) except (OSError, IOError): return 1 else: if tsk.chmod: os.chmod(outfile, tsk.chmod) return 0 def action_process_file_func(tsk): "Ask the function attached to the task to process it" if not tsk.fun: raise Utils.WafError('task must have a function attached to it for copy_func to work!') return tsk.fun(tsk) class cmd_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('cmd') def apply_cmd(self): "call a command everytime" if not self.fun: raise Utils.WafError('cmdobj needs a function!') tsk = Task.TaskBase() tsk.fun = self.fun tsk.env = self.env self.tasks.append(tsk) tsk.install_path = self.install_path class copy_taskgen(TaskGen.task_gen): "By default, make a file copy, if fun is provided, fun will make the copy (or call a compiler, etc)" def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('copy') @before('apply_core') def apply_copy(self): Utils.def_attrs(self, fun=copy_func) self.default_install_path = 0 lst = self.to_list(self.source) self.meths.remove('apply_core') for filename in lst: node = self.path.find_resource(filename) if not node: raise Utils.WafError('cannot find input file %s for processing' % filename) target = self.target if not target or len(lst)>1: target = node.name # TODO the file path may be incorrect newnode = self.path.find_or_declare(target) tsk = self.create_task('copy', node, newnode) tsk.fun = self.fun tsk.chmod = self.chmod tsk.install_path = self.install_path if not tsk.env: tsk.debug() raise Utils.WafError('task without an environment') def subst_func(tsk): "Substitutes variables in a .in file" m4_re = re.compile('@(\w+)@', re.M) env = tsk.env infile = tsk.inputs[0].abspath(env) outfile = tsk.outputs[0].abspath(env) code = Utils.readf(infile) # replace all % by %% to prevent errors by % signs in the input file while string formatting code = code.replace('%', '%%') s = m4_re.sub(r'%(\1)s', code) di = tsk.dict or {} if not di: names = m4_re.findall(code) for i in names: di[i] = env.get_flat(i) or env.get_flat(i.upper()) file = open(outfile, 'w') file.write(s % di) file.close() if tsk.chmod: os.chmod(outfile, tsk.chmod) class subst_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('subst') @before('apply_core') def apply_subst(self): Utils.def_attrs(self, fun=subst_func) self.default_install_path = 0 lst = self.to_list(self.source) self.meths.remove('apply_core') self.dict = getattr(self, 'dict', {}) for filename in lst: node = self.path.find_resource(filename) if not node: raise Utils.WafError('cannot find input file %s for processing' % filename) if self.target: newnode = self.path.find_or_declare(self.target) else: newnode = node.change_ext('') try: self.dict = self.dict.get_merged_dict() except AttributeError: pass if self.dict and not self.env['DICT_HASH']: self.env = self.env.copy() keys = list(self.dict.keys()) keys.sort() lst = [self.dict[x] for x in keys] self.env['DICT_HASH'] = str(Utils.h_list(lst)) tsk = self.create_task('copy', node, newnode) tsk.fun = self.fun tsk.dict = self.dict tsk.dep_vars = ['DICT_HASH'] tsk.install_path = self.install_path tsk.chmod = self.chmod if not tsk.env: tsk.debug() raise Utils.WafError('task without an environment') #################### ## command-output #### #################### class cmd_arg(object): """command-output arguments for representing files or folders""" def __init__(self, name, template='%s'): self.name = name self.template = template self.node = None class input_file(cmd_arg): def find_node(self, base_path): assert isinstance(base_path, Node.Node) self.node = base_path.find_resource(self.name) if self.node is None: raise Utils.WafError("Input file %s not found in " % (self.name, base_path)) def get_path(self, env, absolute): if absolute: return self.template % self.node.abspath(env) else: return self.template % self.node.srcpath(env) class output_file(cmd_arg): def find_node(self, base_path): assert isinstance(base_path, Node.Node) self.node = base_path.find_or_declare(self.name) if self.node is None: raise Utils.WafError("Output file %s not found in " % (self.name, base_path)) def get_path(self, env, absolute): if absolute: return self.template % self.node.abspath(env) else: return self.template % self.node.bldpath(env) class cmd_dir_arg(cmd_arg): def find_node(self, base_path): assert isinstance(base_path, Node.Node) self.node = base_path.find_dir(self.name) if self.node is None: raise Utils.WafError("Directory %s not found in " % (self.name, base_path)) class input_dir(cmd_dir_arg): def get_path(self, dummy_env, dummy_absolute): return self.template % self.node.abspath() class output_dir(cmd_dir_arg): def get_path(self, env, dummy_absolute): return self.template % self.node.abspath(env) class command_output(Task.Task): color = "BLUE" def __init__(self, env, command, command_node, command_args, stdin, stdout, cwd, os_env, stderr): Task.Task.__init__(self, env, normal=1) assert isinstance(command, (str, Node.Node)) self.command = command self.command_args = command_args self.stdin = stdin self.stdout = stdout self.cwd = cwd self.os_env = os_env self.stderr = stderr if command_node is not None: self.dep_nodes = [command_node] self.dep_vars = [] # additional environment variables to look def run(self): task = self #assert len(task.inputs) > 0 def input_path(node, template): if task.cwd is None: return template % node.bldpath(task.env) else: return template % node.abspath() def output_path(node, template): fun = node.abspath if task.cwd is None: fun = node.bldpath return template % fun(task.env) if isinstance(task.command, Node.Node): argv = [input_path(task.command, '%s')] else: argv = [task.command] for arg in task.command_args: if isinstance(arg, str): argv.append(arg) else: assert isinstance(arg, cmd_arg) argv.append(arg.get_path(task.env, (task.cwd is not None))) if task.stdin: stdin = open(input_path(task.stdin, '%s')) else: stdin = None if task.stdout: stdout = open(output_path(task.stdout, '%s'), "w") else: stdout = None if task.stderr: stderr = open(output_path(task.stderr, '%s'), "w") else: stderr = None if task.cwd is None: cwd = ('None (actually %r)' % os.getcwd()) else: cwd = repr(task.cwd) debug("command-output: cwd=%s, stdin=%r, stdout=%r, argv=%r" % (cwd, stdin, stdout, argv)) if task.os_env is None: os_env = os.environ else: os_env = task.os_env command = Utils.pproc.Popen(argv, stdin=stdin, stdout=stdout, stderr=stderr, cwd=task.cwd, env=os_env) return command.wait() class cmd_output_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('command-output') def init_cmd_output(self): Utils.def_attrs(self, stdin = None, stdout = None, stderr = None, # the command to execute command = None, # whether it is an external command; otherwise it is assumed # to be an executable binary or script that lives in the # source or build tree. command_is_external = False, # extra parameters (argv) to pass to the command (excluding # the command itself) argv = [], # dependencies to other objects -> this is probably not what you want (ita) # values must be 'task_gen' instances (not names!) dependencies = [], # dependencies on env variable contents dep_vars = [], # input files that are implicit, i.e. they are not # stdin, nor are they mentioned explicitly in argv hidden_inputs = [], # output files that are implicit, i.e. they are not # stdout, nor are they mentioned explicitly in argv hidden_outputs = [], # change the subprocess to this cwd (must use obj.input_dir() or output_dir() here) cwd = None, # OS environment variables to pass to the subprocess # if None, use the default environment variables unchanged os_env = None) @feature('command-output') @after('init_cmd_output') def apply_cmd_output(self): if self.command is None: raise Utils.WafError("command-output missing command") if self.command_is_external: cmd = self.command cmd_node = None else: cmd_node = self.path.find_resource(self.command) assert cmd_node is not None, ('''Could not find command '%s' in source tree. Hint: if this is an external command, use command_is_external=True''') % (self.command,) cmd = cmd_node if self.cwd is None: cwd = None else: assert isinstance(cwd, CmdDirArg) self.cwd.find_node(self.path) args = [] inputs = [] outputs = [] for arg in self.argv: if isinstance(arg, cmd_arg): arg.find_node(self.path) if isinstance(arg, input_file): inputs.append(arg.node) if isinstance(arg, output_file): outputs.append(arg.node) if self.stdout is None: stdout = None else: assert isinstance(self.stdout, str) stdout = self.path.find_or_declare(self.stdout) if stdout is None: raise Utils.WafError("File %s not found" % (self.stdout,)) outputs.append(stdout) if self.stderr is None: stderr = None else: assert isinstance(self.stderr, str) stderr = self.path.find_or_declare(self.stderr) if stderr is None: raise Utils.WafError("File %s not found" % (self.stderr,)) outputs.append(stderr) if self.stdin is None: stdin = None else: assert isinstance(self.stdin, str) stdin = self.path.find_resource(self.stdin) if stdin is None: raise Utils.WafError("File %s not found" % (self.stdin,)) inputs.append(stdin) for hidden_input in self.to_list(self.hidden_inputs): node = self.path.find_resource(hidden_input) if node is None: raise Utils.WafError("File %s not found in dir %s" % (hidden_input, self.path)) inputs.append(node) for hidden_output in self.to_list(self.hidden_outputs): node = self.path.find_or_declare(hidden_output) if node is None: raise Utils.WafError("File %s not found in dir %s" % (hidden_output, self.path)) outputs.append(node) if not (inputs or getattr(self, 'no_inputs', None)): raise Utils.WafError('command-output objects must have at least one input file or give self.no_inputs') if not (outputs or getattr(self, 'no_outputs', None)): raise Utils.WafError('command-output objects must have at least one output file or give self.no_outputs') task = command_output(self.env, cmd, cmd_node, self.argv, stdin, stdout, cwd, self.os_env, stderr) Utils.copy_attrs(self, task, 'before after ext_in ext_out', only_if_set=True) self.tasks.append(task) task.inputs = inputs task.outputs = outputs task.dep_vars = self.to_list(self.dep_vars) for dep in self.dependencies: assert dep is not self dep.post() for dep_task in dep.tasks: task.set_run_after(dep_task) if not task.inputs: # the case for svnversion, always run, and update the output nodes task.runnable_status = type(Task.TaskBase.run)(runnable_status, task, task.__class__) # always run task.post_run = type(Task.TaskBase.run)(post_run, task, task.__class__) # TODO the case with no outputs? def post_run(self): for x in self.outputs: h = Utils.h_file(x.abspath(self.env)) self.generator.bld.node_sigs[self.env.variant()][x.id] = h def runnable_status(self): return Constants.RUN_ME Task.task_type_from_func('copy', vars=[], func=action_process_file_func) TaskGen.task_gen.classes['command-output'] = cmd_output_taskgen ntdb-1.0/buildtools/wafadmin/Tools/msvc.py000066400000000000000000000640051224151530700206760ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2006 (dv) # Tamas Pal, 2007 (folti) # Nicolas Mercier, 2009 # Microsoft Visual C++/Intel C++ compiler support - beta, needs more testing # usage: # # conf.env['MSVC_VERSIONS'] = ['msvc 9.0', 'msvc 8.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0'] # conf.env['MSVC_TARGETS'] = ['x64'] # conf.check_tool('msvc') # OR conf.check_tool('msvc', funs='no_autodetect') # conf.check_lib_msvc('gdi32') # conf.check_libs_msvc('kernel32 user32', mandatory=true) # ... # obj.uselib = 'KERNEL32 USER32 GDI32' # # platforms and targets will be tested in the order they appear; # the first good configuration will be used # supported platforms : # ia64, x64, x86, x86_amd64, x86_ia64 # compilers supported : # msvc => Visual Studio, versions 7.1 (2003), 8,0 (2005), 9.0 (2008) # wsdk => Windows SDK, versions 6.0, 6.1, 7.0 # icl => Intel compiler, versions 9,10,11 # Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i) # PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i) import os, sys, re, string, optparse import Utils, TaskGen, Runner, Configure, Task, Options from Logs import debug, info, warn, error from TaskGen import after, before, feature from Configure import conftest, conf import ccroot, cc, cxx, ar, winres from libtool import read_la_file try: import _winreg except: import winreg as _winreg pproc = Utils.pproc # importlibs provided by MSVC/Platform SDK. Do NOT search them.... g_msvc_systemlibs = """ aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32 osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32 shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32 traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp """.split() all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64') ] all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ] all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')] def setup_msvc(conf, versions): platforms = Utils.to_list(conf.env['MSVC_TARGETS']) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms] desired_versions = conf.env['MSVC_VERSIONS'] or [v for v,_ in versions][::-1] versiondict = dict(versions) for version in desired_versions: try: targets = dict(versiondict [version]) for target in platforms: try: arch,(p1,p2,p3) = targets[target] compiler,revision = version.split() return compiler,revision,p1,p2,p3 except KeyError: continue except KeyError: continue conf.fatal('msvc: Impossible to find a valid architecture for building (in setup_msvc)') @conf def get_msvc_version(conf, compiler, version, target, vcvars): debug('msvc: get_msvc_version: %r %r %r', compiler, version, target) batfile = os.path.join(conf.blddir, 'waf-print-msvc.bat') f = open(batfile, 'w') f.write("""@echo off set INCLUDE= set LIB= call "%s" %s echo PATH=%%PATH%% echo INCLUDE=%%INCLUDE%% echo LIB=%%LIB%% """ % (vcvars,target)) f.close() sout = Utils.cmd_output(['cmd', '/E:on', '/V:on', '/C', batfile]) lines = sout.splitlines() for x in ('Setting environment', 'Setting SDK environment', 'Intel(R) C++ Compiler'): if lines[0].find(x) != -1: break else: debug('msvc: get_msvc_version: %r %r %r -> not found', compiler, version, target) conf.fatal('msvc: Impossible to find a valid architecture for building (in get_msvc_version)') for line in lines[1:]: if line.startswith('PATH='): path = line[5:] MSVC_PATH = path.split(';') elif line.startswith('INCLUDE='): MSVC_INCDIR = [i for i in line[8:].split(';') if i] elif line.startswith('LIB='): MSVC_LIBDIR = [i for i in line[4:].split(';') if i] # Check if the compiler is usable at all. # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run. env = {} env.update(os.environ) env.update(PATH = path) compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) cxx = conf.find_program(compiler_name, path_list=MSVC_PATH) # delete CL if exists. because it could contain parameters wich can change cl's behaviour rather catastrophically. if env.has_key('CL'): del(env['CL']) try: p = pproc.Popen([cxx, '/help'], env=env, stdout=pproc.PIPE, stderr=pproc.PIPE) out, err = p.communicate() if p.returncode != 0: raise Exception('return code: %r: %r' % (p.returncode, err)) except Exception, e: debug('msvc: get_msvc_version: %r %r %r -> failure', compiler, version, target) debug(str(e)) conf.fatal('msvc: cannot run the compiler (in get_msvc_version)') else: debug('msvc: get_msvc_version: %r %r %r -> OK', compiler, version, target) return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR) @conf def gather_wsdk_versions(conf, versions): version_pattern = re.compile('^v..?.?\...?.?') try: all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows') except WindowsError: try: all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows') except WindowsError: return index = 0 while 1: try: version = _winreg.EnumKey(all_versions, index) except WindowsError: break index = index + 1 if not version_pattern.match(version): continue try: msvc_version = _winreg.OpenKey(all_versions, version) path,type = _winreg.QueryValueEx(msvc_version,'InstallationFolder') except WindowsError: continue if os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')): targets = [] for target,arch in all_msvc_platforms: try: targets.append((target, (arch, conf.get_msvc_version('wsdk', version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd'))))) except Configure.ConfigurationError: pass versions.append(('wsdk ' + version[1:], targets)) @conf def gather_msvc_versions(conf, versions): # checks SmartPhones SDKs try: ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs') except WindowsError: try: ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs') except WindowsError: ce_sdk = '' if ce_sdk: supported_wince_platforms = [] ce_index = 0 while 1: try: sdk_device = _winreg.EnumKey(ce_sdk, ce_index) except WindowsError: break ce_index = ce_index + 1 sdk = _winreg.OpenKey(ce_sdk, sdk_device) path,type = _winreg.QueryValueEx(sdk, 'SDKRootDir') path=str(path) path,device = os.path.split(path) if not device: path,device = os.path.split(path) for arch,compiler in all_wince_platforms: platforms = [] if os.path.isdir(os.path.join(path, device, 'Lib', arch)): platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch))) if platforms: supported_wince_platforms.append((device, platforms)) # checks MSVC version_pattern = re.compile('^..?\...?') for vcver,vcvar in [('VCExpress','exp'), ('VisualStudio','')]: try: all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\'+vcver) except WindowsError: try: all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\'+vcver) except WindowsError: continue index = 0 while 1: try: version = _winreg.EnumKey(all_versions, index) except WindowsError: break index = index + 1 if not version_pattern.match(version): continue try: msvc_version = _winreg.OpenKey(all_versions, version + "\\Setup\\VS") path,type = _winreg.QueryValueEx(msvc_version, 'ProductDir') path=str(path) targets = [] if ce_sdk: for device,platforms in supported_wince_platforms: cetargets = [] for platform,compiler,include,lib in platforms: winCEpath = os.path.join(path, 'VC', 'ce') if os.path.isdir(winCEpath): common_bindirs,_1,_2 = conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')) if os.path.isdir(os.path.join(winCEpath, 'lib', platform)): bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] + common_bindirs incdirs = [include, os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include')] libdirs = [lib, os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform)] cetargets.append((platform, (platform, (bindirs,incdirs,libdirs)))) versions.append((device+' '+version, cetargets)) if os.path.isfile(os.path.join(path, 'VC', 'vcvarsall.bat')): for target,realtarget in all_msvc_platforms[::-1]: try: targets.append((target, (realtarget, conf.get_msvc_version('msvc', version, target, os.path.join(path, 'VC', 'vcvarsall.bat'))))) except: pass elif os.path.isfile(os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')): try: targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat'))))) except Configure.ConfigurationError: pass versions.append(('msvc '+version, targets)) except WindowsError: continue @conf def gather_icl_versions(conf, versions): version_pattern = re.compile('^...?.?\....?.?') try: all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++') except WindowsError: try: all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++') except WindowsError: return index = 0 while 1: try: version = _winreg.EnumKey(all_versions, index) except WindowsError: break index = index + 1 if not version_pattern.match(version): continue targets = [] for target,arch in all_icl_platforms: try: icl_version = _winreg.OpenKey(all_versions, version+'\\'+target) path,type = _winreg.QueryValueEx(icl_version,'ProductDir') if os.path.isfile(os.path.join(path, 'bin', 'iclvars.bat')): try: targets.append((target, (arch, conf.get_msvc_version('intel', version, target, os.path.join(path, 'bin', 'iclvars.bat'))))) except Configure.ConfigurationError: pass except WindowsError: continue major = version[0:2] versions.append(('intel ' + major, targets)) @conf def get_msvc_versions(conf): if not conf.env.MSVC_INSTALLED_VERSIONS: lst = [] conf.gather_msvc_versions(lst) conf.gather_wsdk_versions(lst) conf.gather_icl_versions(lst) conf.env.MSVC_INSTALLED_VERSIONS = lst return conf.env.MSVC_INSTALLED_VERSIONS @conf def print_all_msvc_detected(conf): for version,targets in conf.env['MSVC_INSTALLED_VERSIONS']: info(version) for target,l in targets: info("\t"+target) def detect_msvc(conf): versions = get_msvc_versions(conf) return setup_msvc(conf, versions) @conf def find_lt_names_msvc(self, libname, is_static=False): """ Win32/MSVC specific code to glean out information from libtool la files. this function is not attached to the task_gen class """ lt_names=[ 'lib%s.la' % libname, '%s.la' % libname, ] for path in self.env['LIBPATH']: for la in lt_names: laf=os.path.join(path,la) dll=None if os.path.exists(laf): ltdict=read_la_file(laf) lt_libdir=None if ltdict.get('libdir', ''): lt_libdir = ltdict['libdir'] if not is_static and ltdict.get('library_names', ''): dllnames=ltdict['library_names'].split() dll=dllnames[0].lower() dll=re.sub('\.dll$', '', dll) return (lt_libdir, dll, False) elif ltdict.get('old_library', ''): olib=ltdict['old_library'] if os.path.exists(os.path.join(path,olib)): return (path, olib, True) elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)): return (lt_libdir, olib, True) else: return (None, olib, True) else: raise Utils.WafError('invalid libtool object file: %s' % laf) return (None, None, None) @conf def libname_msvc(self, libname, is_static=False, mandatory=False): lib = libname.lower() lib = re.sub('\.lib$','',lib) if lib in g_msvc_systemlibs: return lib lib=re.sub('^lib','',lib) if lib == 'm': return None (lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static) if lt_path != None and lt_libname != None: if lt_static == True: # file existance check has been made by find_lt_names return os.path.join(lt_path,lt_libname) if lt_path != None: _libpaths=[lt_path] + self.env['LIBPATH'] else: _libpaths=self.env['LIBPATH'] static_libs=[ 'lib%ss.lib' % lib, 'lib%s.lib' % lib, '%ss.lib' % lib, '%s.lib' %lib, ] dynamic_libs=[ 'lib%s.dll.lib' % lib, 'lib%s.dll.a' % lib, '%s.dll.lib' % lib, '%s.dll.a' % lib, 'lib%s_d.lib' % lib, '%s_d.lib' % lib, '%s.lib' %lib, ] libnames=static_libs if not is_static: libnames=dynamic_libs + static_libs for path in _libpaths: for libn in libnames: if os.path.exists(os.path.join(path, libn)): debug('msvc: lib found: %s', os.path.join(path,libn)) return re.sub('\.lib$', '',libn) #if no lib can be found, just return the libname as msvc expects it if mandatory: self.fatal("The library %r could not be found" % libname) return re.sub('\.lib$', '', libname) @conf def check_lib_msvc(self, libname, is_static=False, uselib_store=None, mandatory=False): "This is the api to use" libn = self.libname_msvc(libname, is_static, mandatory) if not uselib_store: uselib_store = libname.upper() # Note: ideally we should be able to place the lib in the right env var, either STATICLIB or LIB, # but we don't distinguish static libs from shared libs. # This is ok since msvc doesn't have any special linker flag to select static libs (no env['STATICLIB_MARKER']) if False and is_static: # disabled self.env['STATICLIB_' + uselib_store] = [libn] else: self.env['LIB_' + uselib_store] = [libn] @conf def check_libs_msvc(self, libnames, is_static=False, mandatory=False): for libname in Utils.to_list(libnames): self.check_lib_msvc(libname, is_static, mandatory=mandatory) @conftest def no_autodetect(conf): conf.eval_rules(detect.replace('autodetect', '')) detect = ''' autodetect find_msvc msvc_common_flags cc_load_tools cxx_load_tools cc_add_flags cxx_add_flags link_add_flags ''' @conftest def autodetect(conf): v = conf.env compiler, version, path, includes, libdirs = detect_msvc(conf) v['PATH'] = path v['CPPPATH'] = includes v['LIBPATH'] = libdirs v['MSVC_COMPILER'] = compiler def _get_prog_names(conf, compiler): if compiler=='intel': compiler_name = 'ICL' linker_name = 'XILINK' lib_name = 'XILIB' else: # assumes CL.exe compiler_name = 'CL' linker_name = 'LINK' lib_name = 'LIB' return compiler_name, linker_name, lib_name @conftest def find_msvc(conf): # due to path format limitations, limit operation only to native Win32. Yeah it sucks. if sys.platform != 'win32': conf.fatal('MSVC module only works under native Win32 Python! cygwin is not supported yet') v = conf.env compiler, version, path, includes, libdirs = detect_msvc(conf) compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) has_msvc_manifest = (compiler == 'msvc' and float(version) >= 8) or (compiler == 'wsdk' and float(version) >= 6) or (compiler == 'intel' and float(version) >= 11) # compiler cxx = None if v.CXX: cxx = v.CXX elif 'CXX' in conf.environ: cxx = conf.environ['CXX'] if not cxx: cxx = conf.find_program(compiler_name, var='CXX', path_list=path, mandatory=True) cxx = conf.cmd_to_list(cxx) # before setting anything, check if the compiler is really msvc env = dict(conf.environ) env.update(PATH = ';'.join(path)) if not Utils.cmd_output([cxx, '/nologo', '/?'], silent=True, env=env): conf.fatal('the msvc compiler could not be identified') link = v.LINK_CXX if not link: link = conf.find_program(linker_name, path_list=path, mandatory=True) ar = v.AR if not ar: ar = conf.find_program(lib_name, path_list=path, mandatory=True) # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later mt = v.MT if has_msvc_manifest: mt = conf.find_program('MT', path_list=path, mandatory=True) # no more possibility of failure means the data state will be consistent # we may store the data safely now v.MSVC_MANIFEST = has_msvc_manifest v.PATH = path v.CPPPATH = includes v.LIBPATH = libdirs # c/c++ compiler v.CC = v.CXX = cxx v.CC_NAME = v.CXX_NAME = 'msvc' v.LINK = v.LINK_CXX = link if not v.LINK_CC: v.LINK_CC = v.LINK_CXX v.AR = ar v.MT = mt v.MTFLAGS = v.ARFLAGS = ['/NOLOGO'] conf.check_tool('winres') if not conf.env.WINRC: warn('Resource compiler not found. Compiling resource file is disabled') # environment flags try: v.prepend_value('CPPPATH', conf.environ['INCLUDE']) except KeyError: pass try: v.prepend_value('LIBPATH', conf.environ['LIB']) except KeyError: pass @conftest def msvc_common_flags(conf): v = conf.env v['CPPFLAGS'] = ['/W3', '/nologo'] v['CCDEFINES_ST'] = '/D%s' v['CXXDEFINES_ST'] = '/D%s' # TODO just use _WIN32, which defined by the compiler itself! v['CCDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway v['CXXDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway v['_CCINCFLAGS'] = [] v['_CCDEFFLAGS'] = [] v['_CXXINCFLAGS'] = [] v['_CXXDEFFLAGS'] = [] v['CC_SRC_F'] = '' v['CC_TGT_F'] = ['/c', '/Fo'] v['CXX_SRC_F'] = '' v['CXX_TGT_F'] = ['/c', '/Fo'] v['CPPPATH_ST'] = '/I%s' # template for adding include paths v['AR_TGT_F'] = v['CCLNK_TGT_F'] = v['CXXLNK_TGT_F'] = '/OUT:' # Subsystem specific flags v['CPPFLAGS_CONSOLE'] = ['/SUBSYSTEM:CONSOLE'] v['CPPFLAGS_NATIVE'] = ['/SUBSYSTEM:NATIVE'] v['CPPFLAGS_POSIX'] = ['/SUBSYSTEM:POSIX'] v['CPPFLAGS_WINDOWS'] = ['/SUBSYSTEM:WINDOWS'] v['CPPFLAGS_WINDOWSCE'] = ['/SUBSYSTEM:WINDOWSCE'] # CRT specific flags v['CPPFLAGS_CRT_MULTITHREADED'] = ['/MT'] v['CPPFLAGS_CRT_MULTITHREADED_DLL'] = ['/MD'] # TODO these are defined by the compiler itself! v['CPPDEFINES_CRT_MULTITHREADED'] = ['_MT'] # this is defined by the compiler itself! v['CPPDEFINES_CRT_MULTITHREADED_DLL'] = ['_MT', '_DLL'] # these are defined by the compiler itself! v['CPPFLAGS_CRT_MULTITHREADED_DBG'] = ['/MTd'] v['CPPFLAGS_CRT_MULTITHREADED_DLL_DBG'] = ['/MDd'] # TODO these are defined by the compiler itself! v['CPPDEFINES_CRT_MULTITHREADED_DBG'] = ['_DEBUG', '_MT'] # these are defined by the compiler itself! v['CPPDEFINES_CRT_MULTITHREADED_DLL_DBG'] = ['_DEBUG', '_MT', '_DLL'] # these are defined by the compiler itself! # compiler debug levels v['CCFLAGS'] = ['/TC'] v['CCFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG'] v['CCFLAGS_RELEASE'] = ['/O2', '/DNDEBUG'] v['CCFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI'] v['CCFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI'] v['CXXFLAGS'] = ['/TP', '/EHsc'] v['CXXFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG'] v['CXXFLAGS_RELEASE'] = ['/O2', '/DNDEBUG'] v['CXXFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI'] v['CXXFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI'] # linker v['LIB'] = [] v['LIB_ST'] = '%s.lib' # template for adding libs v['LIBPATH_ST'] = '/LIBPATH:%s' # template for adding libpaths v['STATICLIB_ST'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib' v['STATICLIBPATH_ST'] = '/LIBPATH:%s' v['LINKFLAGS'] = ['/NOLOGO'] if v['MSVC_MANIFEST']: v.append_value('LINKFLAGS', '/MANIFEST') v['LINKFLAGS_DEBUG'] = ['/DEBUG'] v['LINKFLAGS_ULTRADEBUG'] = ['/DEBUG'] # shared library v['shlib_CCFLAGS'] = [''] v['shlib_CXXFLAGS'] = [''] v['shlib_LINKFLAGS']= ['/DLL'] v['shlib_PATTERN'] = '%s.dll' v['implib_PATTERN'] = '%s.lib' v['IMPLIB_ST'] = '/IMPLIB:%s' # static library v['staticlib_LINKFLAGS'] = [''] v['staticlib_PATTERN'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib' # program v['program_PATTERN'] = '%s.exe' ####################################################################################################### ##### conf above, build below @after('apply_link') @feature('cc', 'cxx') def apply_flags_msvc(self): if self.env.CC_NAME != 'msvc' or not self.link_task: return subsystem = getattr(self, 'subsystem', '') if subsystem: subsystem = '/subsystem:%s' % subsystem flags = 'cstaticlib' in self.features and 'ARFLAGS' or 'LINKFLAGS' self.env.append_value(flags, subsystem) if getattr(self, 'link_task', None) and not 'cstaticlib' in self.features: for f in self.env.LINKFLAGS: d = f.lower() if d[1:] == 'debug': pdbnode = self.link_task.outputs[0].change_ext('.pdb') pdbfile = pdbnode.bldpath(self.env) self.link_task.outputs.append(pdbnode) self.bld.install_files(self.install_path, [pdbnode], env=self.env) break @feature('cprogram', 'cshlib', 'cstaticlib') @after('apply_lib_vars') @before('apply_obj_vars') def apply_obj_vars_msvc(self): if self.env['CC_NAME'] != 'msvc': return try: self.meths.remove('apply_obj_vars') except ValueError: pass libpaths = getattr(self, 'libpaths', []) if not libpaths: self.libpaths = libpaths env = self.env app = env.append_unique cpppath_st = env['CPPPATH_ST'] lib_st = env['LIB_ST'] staticlib_st = env['STATICLIB_ST'] libpath_st = env['LIBPATH_ST'] staticlibpath_st = env['STATICLIBPATH_ST'] for i in env['LIBPATH']: app('LINKFLAGS', libpath_st % i) if not libpaths.count(i): libpaths.append(i) for i in env['LIBPATH']: app('LINKFLAGS', staticlibpath_st % i) if not libpaths.count(i): libpaths.append(i) # i doubt that anyone will make a fully static binary anyway if not env['FULLSTATIC']: if env['STATICLIB'] or env['LIB']: app('LINKFLAGS', env['SHLIB_MARKER']) # TODO does SHLIB_MARKER work? for i in env['STATICLIB']: app('LINKFLAGS', staticlib_st % i) for i in env['LIB']: app('LINKFLAGS', lib_st % i) # split the manifest file processing from the link task, like for the rc processing @feature('cprogram', 'cshlib') @after('apply_link') def apply_manifest(self): """Special linker for MSVC with support for embedding manifests into DLL's and executables compiled by Visual Studio 2005 or probably later. Without the manifest file, the binaries are unusable. See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx""" if self.env.CC_NAME == 'msvc' and self.env.MSVC_MANIFEST: out_node = self.link_task.outputs[0] man_node = out_node.parent.find_or_declare(out_node.name + '.manifest') self.link_task.outputs.append(man_node) self.link_task.do_manifest = True def exec_mf(self): env = self.env mtool = env['MT'] if not mtool: return 0 self.do_manifest = False outfile = self.outputs[0].bldpath(env) manifest = None for out_node in self.outputs: if out_node.name.endswith('.manifest'): manifest = out_node.bldpath(env) break if manifest is None: # Should never get here. If we do, it means the manifest file was # never added to the outputs list, thus we don't have a manifest file # to embed, so we just return. return 0 # embedding mode. Different for EXE's and DLL's. # see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx mode = '' if 'cprogram' in self.generator.features: mode = '1' elif 'cshlib' in self.generator.features: mode = '2' debug('msvc: embedding manifest') #flags = ' '.join(env['MTFLAGS'] or []) lst = [] lst.extend([env['MT']]) lst.extend(Utils.to_list(env['MTFLAGS'])) lst.extend(Utils.to_list("-manifest")) lst.extend(Utils.to_list(manifest)) lst.extend(Utils.to_list("-outputresource:%s;%s" % (outfile, mode))) #cmd='%s %s -manifest "%s" -outputresource:"%s";#%s' % (mtool, flags, # manifest, outfile, mode) lst = [lst] return self.exec_command(*lst) ########## stupid evil command modification: concatenate the tokens /Fx, /doc, and /x: with the next token def exec_command_msvc(self, *k, **kw): "instead of quoting all the paths and keep using the shell, we can just join the options msvc is interested in" if self.env['CC_NAME'] == 'msvc': if isinstance(k[0], list): lst = [] carry = '' for a in k[0]: if len(a) == 3 and a.startswith('/F') or a == '/doc' or a[-1] == ':': carry = a else: lst.append(carry + a) carry = '' k = [lst] env = dict(os.environ) env.update(PATH = ';'.join(self.env['PATH'])) kw['env'] = env ret = self.generator.bld.exec_command(*k, **kw) if ret: return ret if getattr(self, 'do_manifest', None): ret = exec_mf(self) return ret for k in 'cc cxx winrc cc_link cxx_link static_link qxx'.split(): cls = Task.TaskBase.classes.get(k, None) if cls: cls.exec_command = exec_command_msvc ntdb-1.0/buildtools/wafadmin/Tools/nasm.py000066400000000000000000000024741224151530700206660ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008 """ Nasm processing """ import os import TaskGen, Task, Utils from TaskGen import taskgen, before, extension nasm_str = '${NASM} ${NASM_FLAGS} ${NASM_INCLUDES} ${SRC} -o ${TGT}' EXT_NASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP'] @before('apply_link') def apply_nasm_vars(self): # flags if hasattr(self, 'nasm_flags'): for flag in self.to_list(self.nasm_flags): self.env.append_value('NASM_FLAGS', flag) # includes - well, if we suppose it works with c processing if hasattr(self, 'includes'): for inc in self.to_list(self.includes): node = self.path.find_dir(inc) if not node: raise Utils.WafError('cannot find the dir' + inc) self.env.append_value('NASM_INCLUDES', '-I%s' % node.srcpath(self.env)) self.env.append_value('NASM_INCLUDES', '-I%s' % node.bldpath(self.env)) @extension(EXT_NASM) def nasm_file(self, node): try: obj_ext = self.obj_ext except AttributeError: obj_ext = '_%d.o' % self.idx task = self.create_task('nasm', node, node.change_ext(obj_ext)) self.compiled_tasks.append(task) self.meths.append('apply_nasm_vars') # create our action here Task.simple_task_type('nasm', nasm_str, color='BLUE', ext_out='.o', shell=False) def detect(conf): nasm = conf.find_program(['nasm', 'yasm'], var='NASM', mandatory=True) ntdb-1.0/buildtools/wafadmin/Tools/ocaml.py000066400000000000000000000216051224151530700210200ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) "ocaml support" import os, re import TaskGen, Utils, Task, Build from Logs import error from TaskGen import taskgen, feature, before, after, extension EXT_MLL = ['.mll'] EXT_MLY = ['.mly'] EXT_MLI = ['.mli'] EXT_MLC = ['.c'] EXT_ML = ['.ml'] open_re = re.compile('^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M) foo = re.compile(r"""(\(\*)|(\*\))|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^()*"'\\]*)""", re.M) def filter_comments(txt): meh = [0] def repl(m): if m.group(1): meh[0] += 1 elif m.group(2): meh[0] -= 1 elif not meh[0]: return m.group(0) return '' return foo.sub(repl, txt) def scan(self): node = self.inputs[0] code = filter_comments(node.read(self.env)) global open_re names = [] import_iterator = open_re.finditer(code) if import_iterator: for import_match in import_iterator: names.append(import_match.group(1)) found_lst = [] raw_lst = [] for name in names: nd = None for x in self.incpaths: nd = x.find_resource(name.lower()+'.ml') if not nd: nd = x.find_resource(name+'.ml') if nd: found_lst.append(nd) break else: raw_lst.append(name) return (found_lst, raw_lst) native_lst=['native', 'all', 'c_object'] bytecode_lst=['bytecode', 'all'] class ocaml_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('ocaml') def init_ml(self): Utils.def_attrs(self, type = 'all', incpaths_lst = [], bld_incpaths_lst = [], mlltasks = [], mlytasks = [], mlitasks = [], native_tasks = [], bytecode_tasks = [], linktasks = [], bytecode_env = None, native_env = None, compiled_tasks = [], includes = '', uselib = '', are_deps_set = 0) @feature('ocaml') @after('init_ml') def init_envs_ml(self): self.islibrary = getattr(self, 'islibrary', False) global native_lst, bytecode_lst self.native_env = None if self.type in native_lst: self.native_env = self.env.copy() if self.islibrary: self.native_env['OCALINKFLAGS'] = '-a' self.bytecode_env = None if self.type in bytecode_lst: self.bytecode_env = self.env.copy() if self.islibrary: self.bytecode_env['OCALINKFLAGS'] = '-a' if self.type == 'c_object': self.native_env.append_unique('OCALINKFLAGS_OPT', '-output-obj') @feature('ocaml') @before('apply_vars_ml') @after('init_envs_ml') def apply_incpaths_ml(self): inc_lst = self.includes.split() lst = self.incpaths_lst for dir in inc_lst: node = self.path.find_dir(dir) if not node: error("node not found: " + str(dir)) continue self.bld.rescan(node) if not node in lst: lst.append(node) self.bld_incpaths_lst.append(node) # now the nodes are added to self.incpaths_lst @feature('ocaml') @before('apply_core') def apply_vars_ml(self): for i in self.incpaths_lst: if self.bytecode_env: app = self.bytecode_env.append_value app('OCAMLPATH', '-I') app('OCAMLPATH', i.srcpath(self.env)) app('OCAMLPATH', '-I') app('OCAMLPATH', i.bldpath(self.env)) if self.native_env: app = self.native_env.append_value app('OCAMLPATH', '-I') app('OCAMLPATH', i.bldpath(self.env)) app('OCAMLPATH', '-I') app('OCAMLPATH', i.srcpath(self.env)) varnames = ['INCLUDES', 'OCAMLFLAGS', 'OCALINKFLAGS', 'OCALINKFLAGS_OPT'] for name in self.uselib.split(): for vname in varnames: cnt = self.env[vname+'_'+name] if cnt: if self.bytecode_env: self.bytecode_env.append_value(vname, cnt) if self.native_env: self.native_env.append_value(vname, cnt) @feature('ocaml') @after('apply_core') def apply_link_ml(self): if self.bytecode_env: ext = self.islibrary and '.cma' or '.run' linktask = self.create_task('ocalink') linktask.bytecode = 1 linktask.set_outputs(self.path.find_or_declare(self.target + ext)) linktask.obj = self linktask.env = self.bytecode_env self.linktasks.append(linktask) if self.native_env: if self.type == 'c_object': ext = '.o' elif self.islibrary: ext = '.cmxa' else: ext = '' linktask = self.create_task('ocalinkx') linktask.set_outputs(self.path.find_or_declare(self.target + ext)) linktask.obj = self linktask.env = self.native_env self.linktasks.append(linktask) # we produce a .o file to be used by gcc self.compiled_tasks.append(linktask) @extension(EXT_MLL) def mll_hook(self, node): mll_task = self.create_task('ocamllex', node, node.change_ext('.ml'), env=self.native_env) self.mlltasks.append(mll_task) self.allnodes.append(mll_task.outputs[0]) @extension(EXT_MLY) def mly_hook(self, node): mly_task = self.create_task('ocamlyacc', node, [node.change_ext('.ml'), node.change_ext('.mli')], env=self.native_env) self.mlytasks.append(mly_task) self.allnodes.append(mly_task.outputs[0]) task = self.create_task('ocamlcmi', mly_task.outputs[1], mly_task.outputs[1].change_ext('.cmi'), env=self.native_env) @extension(EXT_MLI) def mli_hook(self, node): task = self.create_task('ocamlcmi', node, node.change_ext('.cmi'), env=self.native_env) self.mlitasks.append(task) @extension(EXT_MLC) def mlc_hook(self, node): task = self.create_task('ocamlcc', node, node.change_ext('.o'), env=self.native_env) self.compiled_tasks.append(task) @extension(EXT_ML) def ml_hook(self, node): if self.native_env: task = self.create_task('ocamlx', node, node.change_ext('.cmx'), env=self.native_env) task.obj = self task.incpaths = self.bld_incpaths_lst self.native_tasks.append(task) if self.bytecode_env: task = self.create_task('ocaml', node, node.change_ext('.cmo'), env=self.bytecode_env) task.obj = self task.bytecode = 1 task.incpaths = self.bld_incpaths_lst self.bytecode_tasks.append(task) def compile_may_start(self): if not getattr(self, 'flag_deps', ''): self.flag_deps = 1 # the evil part is that we can only compute the dependencies after the # source files can be read (this means actually producing the source files) if getattr(self, 'bytecode', ''): alltasks = self.obj.bytecode_tasks else: alltasks = self.obj.native_tasks self.signature() # ensure that files are scanned - unfortunately tree = self.generator.bld env = self.env for node in self.inputs: lst = tree.node_deps[self.unique_id()] for depnode in lst: for t in alltasks: if t == self: continue if depnode in t.inputs: self.set_run_after(t) # TODO necessary to get the signature right - for now delattr(self, 'cache_sig') self.signature() return Task.Task.runnable_status(self) b = Task.simple_task_type cls = b('ocamlx', '${OCAMLOPT} ${OCAMLPATH} ${OCAMLFLAGS} ${INCLUDES} -c -o ${TGT} ${SRC}', color='GREEN', shell=False) cls.runnable_status = compile_may_start cls.scan = scan b = Task.simple_task_type cls = b('ocaml', '${OCAMLC} ${OCAMLPATH} ${OCAMLFLAGS} ${INCLUDES} -c -o ${TGT} ${SRC}', color='GREEN', shell=False) cls.runnable_status = compile_may_start cls.scan = scan b('ocamlcmi', '${OCAMLC} ${OCAMLPATH} ${INCLUDES} -o ${TGT} -c ${SRC}', color='BLUE', before="ocaml ocamlcc ocamlx") b('ocamlcc', 'cd ${TGT[0].bld_dir(env)} && ${OCAMLOPT} ${OCAMLFLAGS} ${OCAMLPATH} ${INCLUDES} -c ${SRC[0].abspath(env)}', color='GREEN') b('ocamllex', '${OCAMLLEX} ${SRC} -o ${TGT}', color='BLUE', before="ocamlcmi ocaml ocamlcc") b('ocamlyacc', '${OCAMLYACC} -b ${TGT[0].bld_base(env)} ${SRC}', color='BLUE', before="ocamlcmi ocaml ocamlcc") def link_may_start(self): if not getattr(self, 'order', ''): # now reorder the inputs given the task dependencies if getattr(self, 'bytecode', 0): alltasks = self.obj.bytecode_tasks else: alltasks = self.obj.native_tasks # this part is difficult, we do not have a total order on the tasks # if the dependencies are wrong, this may not stop seen = [] pendant = []+alltasks while pendant: task = pendant.pop(0) if task in seen: continue for x in task.run_after: if not x in seen: pendant.append(task) break else: seen.append(task) self.inputs = [x.outputs[0] for x in seen] self.order = 1 return Task.Task.runnable_status(self) act = b('ocalink', '${OCAMLC} -o ${TGT} ${INCLUDES} ${OCALINKFLAGS} ${SRC}', color='YELLOW', after="ocaml ocamlcc") act.runnable_status = link_may_start act = b('ocalinkx', '${OCAMLOPT} -o ${TGT} ${INCLUDES} ${OCALINKFLAGS_OPT} ${SRC}', color='YELLOW', after="ocamlx ocamlcc") act.runnable_status = link_may_start def detect(conf): opt = conf.find_program('ocamlopt', var='OCAMLOPT') occ = conf.find_program('ocamlc', var='OCAMLC') if (not opt) or (not occ): conf.fatal('The objective caml compiler was not found:\ninstall it or make it available in your PATH') v = conf.env v['OCAMLC'] = occ v['OCAMLOPT'] = opt v['OCAMLLEX'] = conf.find_program('ocamllex', var='OCAMLLEX') v['OCAMLYACC'] = conf.find_program('ocamlyacc', var='OCAMLYACC') v['OCAMLFLAGS'] = '' v['OCAMLLIB'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep v['LIBPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep v['CPPPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep v['LIB_OCAML'] = 'camlrun' ntdb-1.0/buildtools/wafadmin/Tools/osx.py000066400000000000000000000132731224151530700205400ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2008 """MacOSX related tools To compile an executable into a Mac application bundle (a .app), set its 'mac_app' attribute obj.mac_app = True To make a bundled shared library (a .bundle), set the 'mac_bundle' attribute: obj.mac_bundle = True """ import os, shutil, sys, platform import TaskGen, Task, Build, Options, Utils from TaskGen import taskgen, feature, after, before from Logs import error, debug # plist template app_info = ''' CFBundlePackageType APPL CFBundleGetInfoString Created by Waf CFBundleSignature ???? NOTE THIS IS A GENERATED FILE, DO NOT MODIFY CFBundleExecutable %s ''' # see WAF issue 285 # and also http://trac.macports.org/ticket/17059 @feature('cc', 'cxx') @before('apply_lib_vars') def set_macosx_deployment_target(self): if self.env['MACOSX_DEPLOYMENT_TARGET']: os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env['MACOSX_DEPLOYMENT_TARGET'] elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ: if sys.platform == 'darwin': os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2]) @feature('cc', 'cxx') @after('apply_lib_vars') def apply_framework(self): for x in self.to_list(self.env['FRAMEWORKPATH']): frameworkpath_st = '-F%s' self.env.append_unique('CXXFLAGS', frameworkpath_st % x) self.env.append_unique('CCFLAGS', frameworkpath_st % x) self.env.append_unique('LINKFLAGS', frameworkpath_st % x) for x in self.to_list(self.env['FRAMEWORK']): self.env.append_value('LINKFLAGS', ['-framework', x]) @taskgen def create_bundle_dirs(self, name, out): bld = self.bld dir = out.parent.get_dir(name) if not dir: dir = out.__class__(name, out.parent, 1) bld.rescan(dir) contents = out.__class__('Contents', dir, 1) bld.rescan(contents) macos = out.__class__('MacOS', contents, 1) bld.rescan(macos) return dir def bundle_name_for_output(out): name = out.name k = name.rfind('.') if k >= 0: name = name[:k] + '.app' else: name = name + '.app' return name @taskgen @after('apply_link') @feature('cprogram') def create_task_macapp(self): """Use env['MACAPP'] to force *all* executables to be transformed into Mac applications or use obj.mac_app = True to build specific targets as Mac apps""" if self.env['MACAPP'] or getattr(self, 'mac_app', False): apptask = self.create_task('macapp') apptask.set_inputs(self.link_task.outputs) out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'MacOS', out.name]) apptask.set_outputs([n1]) apptask.chmod = 0755 apptask.install_path = os.path.join(self.install_path, name, 'Contents', 'MacOS') self.apptask = apptask @after('apply_link') @feature('cprogram') def create_task_macplist(self): """Use env['MACAPP'] to force *all* executables to be transformed into Mac applications or use obj.mac_app = True to build specific targets as Mac apps""" if self.env['MACAPP'] or getattr(self, 'mac_app', False): # check if the user specified a plist before using our template if not getattr(self, 'mac_plist', False): self.mac_plist = app_info plisttask = self.create_task('macplist') plisttask.set_inputs(self.link_task.outputs) out = self.link_task.outputs[0] self.mac_plist = self.mac_plist % (out.name) name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'Info.plist']) plisttask.set_outputs([n1]) plisttask.mac_plist = self.mac_plist plisttask.install_path = os.path.join(self.install_path, name, 'Contents') self.plisttask = plisttask @after('apply_link') @feature('cshlib') def apply_link_osx(self): name = self.link_task.outputs[0].name if not self.install_path: return if getattr(self, 'vnum', None): name = name.replace('.dylib', '.%s.dylib' % self.vnum) path = os.path.join(Utils.subst_vars(self.install_path, self.env), name) if '-dynamiclib' in self.env['LINKFLAGS']: self.env.append_value('LINKFLAGS', '-install_name') self.env.append_value('LINKFLAGS', path) @before('apply_link', 'apply_lib_vars') @feature('cc', 'cxx') def apply_bundle(self): """use env['MACBUNDLE'] to force all shlibs into mac bundles or use obj.mac_bundle = True for specific targets only""" if not ('cshlib' in self.features or 'shlib' in self.features): return if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False): self.env['shlib_PATTERN'] = self.env['macbundle_PATTERN'] uselib = self.uselib = self.to_list(self.uselib) if not 'MACBUNDLE' in uselib: uselib.append('MACBUNDLE') @after('apply_link') @feature('cshlib') def apply_bundle_remove_dynamiclib(self): if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False): if not getattr(self, 'vnum', None): try: self.env['LINKFLAGS'].remove('-dynamiclib') self.env['LINKFLAGS'].remove('-single_module') except ValueError: pass # TODO REMOVE IN 1.6 (global variable) app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources'] def app_build(task): env = task.env shutil.copy2(task.inputs[0].srcpath(env), task.outputs[0].abspath(env)) return 0 def plist_build(task): env = task.env f = open(task.outputs[0].abspath(env), "w") f.write(task.mac_plist) f.close() return 0 Task.task_type_from_func('macapp', vars=[], func=app_build, after="cxx_link cc_link static_link") Task.task_type_from_func('macplist', vars=[], func=plist_build, after="cxx_link cc_link static_link") ntdb-1.0/buildtools/wafadmin/Tools/perl.py000066400000000000000000000067531224151530700206760ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # andersg at 0x63.nu 2007 import os import Task, Options, Utils from Configure import conf from TaskGen import extension, taskgen, feature, before xsubpp_str = '${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}' EXT_XS = ['.xs'] @before('apply_incpaths', 'apply_type_vars', 'apply_lib_vars') @feature('perlext') def init_perlext(self): self.uselib = self.to_list(getattr(self, 'uselib', '')) if not 'PERL' in self.uselib: self.uselib.append('PERL') if not 'PERLEXT' in self.uselib: self.uselib.append('PERLEXT') self.env['shlib_PATTERN'] = self.env['perlext_PATTERN'] @extension(EXT_XS) def xsubpp_file(self, node): outnode = node.change_ext('.c') self.create_task('xsubpp', node, outnode) self.allnodes.append(outnode) Task.simple_task_type('xsubpp', xsubpp_str, color='BLUE', before='cc cxx', shell=False) @conf def check_perl_version(conf, minver=None): """ Checks if perl is installed. If installed the variable PERL will be set in environment. Perl binary can be overridden by --with-perl-binary config variable """ if getattr(Options.options, 'perlbinary', None): conf.env.PERL = Options.options.perlbinary else: conf.find_program('perl', var='PERL', mandatory=True) try: version = Utils.cmd_output([conf.env.PERL, '-e', 'printf "%vd",$^V']) except: conf.fatal('could not determine the perl version') conf.env.PERL_VERSION = version cver = '' if minver: try: ver = tuple(map(int, version.split('.'))) except: conf.fatal('unsupported perl version %r' % version) if ver < minver: conf.fatal('perl is too old') cver = '.'.join(map(str,minver)) conf.check_message('perl', cver, True, version) @conf def check_perl_module(conf, module): """ Check if specified perlmodule is installed. Minimum version can be specified by specifying it after modulename like this: conf.check_perl_module("Some::Module 2.92") """ cmd = [conf.env['PERL'], '-e', 'use %s' % module] r = Utils.pproc.call(cmd, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE) == 0 conf.check_message("perl module %s" % module, "", r) return r @conf def check_perl_ext_devel(conf): """ Check for configuration needed to build perl extensions. Sets different xxx_PERLEXT variables in the environment. Also sets the ARCHDIR_PERL variable useful as installation path, which can be overridden by --with-perl-archdir """ if not conf.env.PERL: conf.fatal('perl detection is required first') def read_out(cmd): return Utils.to_list(Utils.cmd_output([conf.env.PERL, '-MConfig', '-e', cmd])) conf.env.LINKFLAGS_PERLEXT = read_out('print $Config{lddlflags}') conf.env.CPPPATH_PERLEXT = read_out('print "$Config{archlib}/CORE"') conf.env.CCFLAGS_PERLEXT = read_out('print "$Config{ccflags} $Config{cccdlflags}"') conf.env.XSUBPP = read_out('print "$Config{privlib}/ExtUtils/xsubpp$Config{exe_ext}"') conf.env.EXTUTILS_TYPEMAP = read_out('print "$Config{privlib}/ExtUtils/typemap"') conf.env.perlext_PATTERN = '%s.' + read_out('print $Config{dlext}')[0] if getattr(Options.options, 'perlarchdir', None): conf.env.ARCHDIR_PERL = Options.options.perlarchdir else: conf.env.ARCHDIR_PERL = read_out('print $Config{sitearch}')[0] def set_options(opt): opt.add_option("--with-perl-binary", type="string", dest="perlbinary", help = 'Specify alternate perl binary', default=None) opt.add_option("--with-perl-archdir", type="string", dest="perlarchdir", help = 'Specify directory where to install arch specific files', default=None) ntdb-1.0/buildtools/wafadmin/Tools/preproc.py000066400000000000000000000525641224151530700214070ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2009 (ita) """ C/C++ preprocessor for finding dependencies Reasons for using the Waf preprocessor by default 1. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files) 2. Not all compilers provide .d files for obtaining the dependencies (portability) 3. A naive file scanner will not catch the constructs such as "#include foo()" 4. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything) Regarding the speed concerns: a. the preprocessing is performed only when files must be compiled b. the macros are evaluated only for #if/#elif/#include c. the time penalty is about 10% d. system headers are not scanned Now if you do not want the Waf preprocessor, the tool "gccdeps" uses the .d files produced during the compilation to track the dependencies (useful when used with the boost libraries). It only works with gcc though, and it cannot be used with Qt builds. A dumb file scanner will be added in the future, so we will have most bahaviours. """ # TODO: more varargs, pragma once # TODO: dumb file scanner tracking all includes import re, sys, os, string import Logs, Build, Utils from Logs import debug, error import traceback class PreprocError(Utils.WafError): pass POPFILE = '-' recursion_limit = 5000 "do not loop too much on header inclusion" go_absolute = 0 "set to 1 to track headers on files in /usr/include - else absolute paths are ignored" standard_includes = ['/usr/include'] if sys.platform == "win32": standard_includes = [] use_trigraphs = 0 'apply the trigraph rules first' strict_quotes = 0 "Keep <> for system includes (do not search for those includes)" g_optrans = { 'not':'!', 'and':'&&', 'bitand':'&', 'and_eq':'&=', 'or':'||', 'bitor':'|', 'or_eq':'|=', 'xor':'^', 'xor_eq':'^=', 'compl':'~', } "these ops are for c++, to reset, set an empty dict" # ignore #warning and #error re_lines = re.compile(\ '^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$', re.IGNORECASE | re.MULTILINE) re_mac = re.compile("^[a-zA-Z_]\w*") re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]') re_pragma_once = re.compile('^\s*once\s*', re.IGNORECASE) re_nl = re.compile('\\\\\r*\n', re.MULTILINE) re_cpp = re.compile( r"""(/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)|//[^\n]*|("(?:\\.|[^"\\])*"|'(?:\\.|[^'\\])*'|.[^/"'\\]*)""", re.MULTILINE) trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')] chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39} NUM = 'i' OP = 'O' IDENT = 'T' STR = 's' CHAR = 'c' tok_types = [NUM, STR, IDENT, OP] exp_types = [ r"""0[xX](?P[a-fA-F0-9]+)(?P[uUlL]*)|L*?'(?P(\\.|[^\\'])+)'|(?P\d+)[Ee](?P[+-]*?\d+)(?P[fFlL]*)|(?P\d*\.\d+)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P\d+\.\d*)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P0*)(?P\d+)(?P[uUlL]*)""", r'L?"([^"\\]|\\.)*"', r'[a-zA-Z_]\w*', r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]', ] re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M) accepted = 'a' ignored = 'i' undefined = 'u' skipped = 's' def repl(m): if m.group(1): return ' ' s = m.group(2) if s is None: return '' return s def filter_comments(filename): # return a list of tuples : keyword, line code = Utils.readf(filename) if use_trigraphs: for (a, b) in trig_def: code = code.split(a).join(b) code = re_nl.sub('', code) code = re_cpp.sub(repl, code) return [(m.group(2), m.group(3)) for m in re.finditer(re_lines, code)] prec = {} # op -> number, needed for such expressions: #if 1 && 2 != 0 ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ','] for x in range(len(ops)): syms = ops[x] for u in syms.split(): prec[u] = x def reduce_nums(val_1, val_2, val_op): """apply arithmetic rules and try to return an integer result""" #print val_1, val_2, val_op # now perform the operation, make certain a and b are numeric try: a = 0 + val_1 except TypeError: a = int(val_1) try: b = 0 + val_2 except TypeError: b = int(val_2) d = val_op if d == '%': c = a%b elif d=='+': c = a+b elif d=='-': c = a-b elif d=='*': c = a*b elif d=='/': c = a/b elif d=='^': c = a^b elif d=='|': c = a|b elif d=='||': c = int(a or b) elif d=='&': c = a&b elif d=='&&': c = int(a and b) elif d=='==': c = int(a == b) elif d=='!=': c = int(a != b) elif d=='<=': c = int(a <= b) elif d=='<': c = int(a < b) elif d=='>': c = int(a > b) elif d=='>=': c = int(a >= b) elif d=='^': c = int(a^b) elif d=='<<': c = a<>': c = a>>b else: c = 0 return c def get_num(lst): if not lst: raise PreprocError("empty list for get_num") (p, v) = lst[0] if p == OP: if v == '(': count_par = 1 i = 1 while i < len(lst): (p, v) = lst[i] if p == OP: if v == ')': count_par -= 1 if count_par == 0: break elif v == '(': count_par += 1 i += 1 else: raise PreprocError("rparen expected %r" % lst) (num, _) = get_term(lst[1:i]) return (num, lst[i+1:]) elif v == '+': return get_num(lst[1:]) elif v == '-': num, lst = get_num(lst[1:]) return (reduce_nums('-1', num, '*'), lst) elif v == '!': num, lst = get_num(lst[1:]) return (int(not int(num)), lst) elif v == '~': return (~ int(num), lst) else: raise PreprocError("invalid op token %r for get_num" % lst) elif p == NUM: return v, lst[1:] elif p == IDENT: # all macros should have been replaced, remaining identifiers eval to 0 return 0, lst[1:] else: raise PreprocError("invalid token %r for get_num" % lst) def get_term(lst): if not lst: raise PreprocError("empty list for get_term") num, lst = get_num(lst) if not lst: return (num, []) (p, v) = lst[0] if p == OP: if v == '&&' and not num: return (num, []) elif v == '||' and num: return (num, []) elif v == ',': # skip return get_term(lst[1:]) elif v == '?': count_par = 0 i = 1 while i < len(lst): (p, v) = lst[i] if p == OP: if v == ')': count_par -= 1 elif v == '(': count_par += 1 elif v == ':': if count_par == 0: break i += 1 else: raise PreprocError("rparen expected %r" % lst) if int(num): return get_term(lst[1:i]) else: return get_term(lst[i+1:]) else: num2, lst = get_num(lst[1:]) if not lst: # no more tokens to process num2 = reduce_nums(num, num2, v) return get_term([(NUM, num2)] + lst) # operator precedence p2, v2 = lst[0] if p2 != OP: raise PreprocError("op expected %r" % lst) if prec[v2] >= prec[v]: num2 = reduce_nums(num, num2, v) return get_term([(NUM, num2)] + lst) else: num3, lst = get_num(lst[1:]) num3 = reduce_nums(num2, num3, v2) return get_term([(NUM, num), (p, v), (NUM, num3)] + lst) raise PreprocError("cannot reduce %r" % lst) def reduce_eval(lst): """take a list of tokens and output true or false (#if/#elif conditions)""" num, lst = get_term(lst) return (NUM, num) def stringize(lst): """use for converting a list of tokens to a string""" lst = [str(v2) for (p2, v2) in lst] return "".join(lst) def paste_tokens(t1, t2): """ here is what we can paste: a ## b -> ab > ## = -> >= a ## 2 -> a2 """ p1 = None if t1[0] == OP and t2[0] == OP: p1 = OP elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM): p1 = IDENT elif t1[0] == NUM and t2[0] == NUM: p1 = NUM if not p1: raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2)) return (p1, t1[1] + t2[1]) def reduce_tokens(lst, defs, ban=[]): """replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied""" i = 0 while i < len(lst): (p, v) = lst[i] if p == IDENT and v == "defined": del lst[i] if i < len(lst): (p2, v2) = lst[i] if p2 == IDENT: if v2 in defs: lst[i] = (NUM, 1) else: lst[i] = (NUM, 0) elif p2 == OP and v2 == '(': del lst[i] (p2, v2) = lst[i] del lst[i] # remove the ident, and change the ) for the value if v2 in defs: lst[i] = (NUM, 1) else: lst[i] = (NUM, 0) else: raise PreprocError("invalid define expression %r" % lst) elif p == IDENT and v in defs: if isinstance(defs[v], str): a, b = extract_macro(defs[v]) defs[v] = b macro_def = defs[v] to_add = macro_def[1] if isinstance(macro_def[0], list): # macro without arguments del lst[i] for x in xrange(len(to_add)): lst.insert(i, to_add[x]) i += 1 else: # collect the arguments for the funcall args = [] del lst[i] if i >= len(lst): raise PreprocError("expected '(' after %r (got nothing)" % v) (p2, v2) = lst[i] if p2 != OP or v2 != '(': raise PreprocError("expected '(' after %r" % v) del lst[i] one_param = [] count_paren = 0 while i < len(lst): p2, v2 = lst[i] del lst[i] if p2 == OP and count_paren == 0: if v2 == '(': one_param.append((p2, v2)) count_paren += 1 elif v2 == ')': if one_param: args.append(one_param) break elif v2 == ',': if not one_param: raise PreprocError("empty param in funcall %s" % p) args.append(one_param) one_param = [] else: one_param.append((p2, v2)) else: one_param.append((p2, v2)) if v2 == '(': count_paren += 1 elif v2 == ')': count_paren -= 1 else: raise PreprocError('malformed macro') # substitute the arguments within the define expression accu = [] arg_table = macro_def[0] j = 0 while j < len(to_add): (p2, v2) = to_add[j] if p2 == OP and v2 == '#': # stringize is for arguments only if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: toks = args[arg_table[to_add[j+1][1]]] accu.append((STR, stringize(toks))) j += 1 else: accu.append((p2, v2)) elif p2 == OP and v2 == '##': # token pasting, how can man invent such a complicated system? if accu and j+1 < len(to_add): # we have at least two tokens t1 = accu[-1] if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: toks = args[arg_table[to_add[j+1][1]]] if toks: accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1]) accu.extend(toks[1:]) else: # error, case "a##" accu.append((p2, v2)) accu.extend(toks) elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__': # TODO not sure # first collect the tokens va_toks = [] st = len(macro_def[0]) pt = len(args) for x in args[pt-st+1:]: va_toks.extend(x) va_toks.append((OP, ',')) if va_toks: va_toks.pop() # extra comma if len(accu)>1: (p3, v3) = accu[-1] (p4, v4) = accu[-2] if v3 == '##': # remove the token paste accu.pop() if v4 == ',' and pt < st: # remove the comma accu.pop() accu += va_toks else: accu[-1] = paste_tokens(t1, to_add[j+1]) j += 1 else: # invalid paste, case "##a" or "b##" accu.append((p2, v2)) elif p2 == IDENT and v2 in arg_table: toks = args[arg_table[v2]] reduce_tokens(toks, defs, ban+[v]) accu.extend(toks) else: accu.append((p2, v2)) j += 1 reduce_tokens(accu, defs, ban+[v]) for x in xrange(len(accu)-1, -1, -1): lst.insert(i, accu[x]) i += 1 def eval_macro(lst, adefs): """reduce the tokens from the list lst, and try to return a 0/1 result""" reduce_tokens(lst, adefs, []) if not lst: raise PreprocError("missing tokens to evaluate") (p, v) = reduce_eval(lst) return int(v) != 0 def extract_macro(txt): """process a macro definition from "#define f(x, y) x * y" into a function or a simple macro without arguments""" t = tokenize(txt) if re_fun.search(txt): p, name = t[0] p, v = t[1] if p != OP: raise PreprocError("expected open parenthesis") i = 1 pindex = 0 params = {} prev = '(' while 1: i += 1 p, v = t[i] if prev == '(': if p == IDENT: params[v] = pindex pindex += 1 prev = p elif p == OP and v == ')': break else: raise PreprocError("unexpected token (3)") elif prev == IDENT: if p == OP and v == ',': prev = v elif p == OP and v == ')': break else: raise PreprocError("comma or ... expected") elif prev == ',': if p == IDENT: params[v] = pindex pindex += 1 prev = p elif p == OP and v == '...': raise PreprocError("not implemented (1)") else: raise PreprocError("comma or ... expected (2)") elif prev == '...': raise PreprocError("not implemented (2)") else: raise PreprocError("unexpected else") #~ print (name, [params, t[i+1:]]) return (name, [params, t[i+1:]]) else: (p, v) = t[0] return (v, [[], t[1:]]) re_include = re.compile('^\s*(<(?P.*)>|"(?P.*)")') def extract_include(txt, defs): """process a line in the form "#include foo" to return a string representing the file""" m = re_include.search(txt) if m: if m.group('a'): return '<', m.group('a') if m.group('b'): return '"', m.group('b') # perform preprocessing and look at the result, it must match an include toks = tokenize(txt) reduce_tokens(toks, defs, ['waf_include']) if not toks: raise PreprocError("could not parse include %s" % txt) if len(toks) == 1: if toks[0][0] == STR: return '"', toks[0][1] else: if toks[0][1] == '<' and toks[-1][1] == '>': return stringize(toks).lstrip('<').rstrip('>') raise PreprocError("could not parse include %s." % txt) def parse_char(txt): if not txt: raise PreprocError("attempted to parse a null char") if txt[0] != '\\': return ord(txt) c = txt[1] if c == 'x': if len(txt) == 4 and txt[3] in string.hexdigits: return int(txt[2:], 16) return int(txt[2:], 16) elif c.isdigit(): if c == '0' and len(txt)==2: return 0 for i in 3, 2, 1: if len(txt) > i and txt[1:1+i].isdigit(): return (1+i, int(txt[1:1+i], 8)) else: try: return chr_esc[c] except KeyError: raise PreprocError("could not parse char literal '%s'" % txt) @Utils.run_once def tokenize_private(s): ret = [] for match in re_clexer.finditer(s): m = match.group for name in tok_types: v = m(name) if v: if name == IDENT: try: v = g_optrans[v]; name = OP except KeyError: # c++ specific if v.lower() == "true": v = 1 name = NUM elif v.lower() == "false": v = 0 name = NUM elif name == NUM: if m('oct'): v = int(v, 8) elif m('hex'): v = int(m('hex'), 16) elif m('n0'): v = m('n0') else: v = m('char') if v: v = parse_char(v) else: v = m('n2') or m('n4') elif name == OP: if v == '%:': v = '#' elif v == '%:%:': v = '##' elif name == STR: # remove the quotes around the string v = v[1:-1] ret.append((name, v)) break return ret def tokenize(s): """convert a string into a list of tokens (shlex.split does not apply to c/c++/d)""" return tokenize_private(s)[:] @Utils.run_once def define_name(line): return re_mac.match(line).group(0) class c_parser(object): def __init__(self, nodepaths=None, defines=None): #self.lines = txt.split('\n') self.lines = [] if defines is None: self.defs = {} else: self.defs = dict(defines) # make a copy self.state = [] self.env = None # needed for the variant when searching for files self.count_files = 0 self.currentnode_stack = [] self.nodepaths = nodepaths or [] self.nodes = [] self.names = [] # file added self.curfile = '' self.ban_includes = set([]) def cached_find_resource(self, node, filename): try: nd = node.bld.cache_nd except: nd = node.bld.cache_nd = {} tup = (node.id, filename) try: return nd[tup] except KeyError: ret = node.find_resource(filename) nd[tup] = ret return ret def tryfind(self, filename): self.curfile = filename # for msvc it should be a for loop on the whole stack found = self.cached_find_resource(self.currentnode_stack[-1], filename) for n in self.nodepaths: if found: break found = self.cached_find_resource(n, filename) if found: self.nodes.append(found) if filename[-4:] != '.moc': self.addlines(found) else: if not filename in self.names: self.names.append(filename) return found def addlines(self, node): self.currentnode_stack.append(node.parent) filepath = node.abspath(self.env) self.count_files += 1 if self.count_files > recursion_limit: raise PreprocError("recursion limit exceeded") pc = self.parse_cache debug('preproc: reading file %r', filepath) try: lns = pc[filepath] except KeyError: pass else: self.lines.extend(lns) return try: lines = filter_comments(filepath) lines.append((POPFILE, '')) lines.reverse() pc[filepath] = lines # cache the lines filtered self.lines.extend(lines) except IOError: raise PreprocError("could not read the file %s" % filepath) except Exception: if Logs.verbose > 0: error("parsing %s failed" % filepath) traceback.print_exc() def start(self, node, env): debug('preproc: scanning %s (in %s)', node.name, node.parent.name) self.env = env variant = node.variant(env) bld = node.__class__.bld try: self.parse_cache = bld.parse_cache except AttributeError: bld.parse_cache = {} self.parse_cache = bld.parse_cache self.addlines(node) if env['DEFLINES']: lst = [('define', x) for x in env['DEFLINES']] lst.reverse() self.lines.extend(lst) while self.lines: (kind, line) = self.lines.pop() if kind == POPFILE: self.currentnode_stack.pop() continue try: self.process_line(kind, line) except Exception, e: if Logs.verbose: debug('preproc: line parsing failed (%s): %s %s', e, line, Utils.ex_stack()) def process_line(self, token, line): """ WARNING: a new state must be added for if* because the endif """ ve = Logs.verbose if ve: debug('preproc: line is %s - %s state is %s', token, line, self.state) state = self.state # make certain we define the state if we are about to enter in an if block if token in ['ifdef', 'ifndef', 'if']: state.append(undefined) elif token == 'endif': state.pop() # skip lines when in a dead 'if' branch, wait for the endif if not token in ['else', 'elif', 'endif']: if skipped in self.state or ignored in self.state: return if token == 'if': ret = eval_macro(tokenize(line), self.defs) if ret: state[-1] = accepted else: state[-1] = ignored elif token == 'ifdef': m = re_mac.match(line) if m and m.group(0) in self.defs: state[-1] = accepted else: state[-1] = ignored elif token == 'ifndef': m = re_mac.match(line) if m and m.group(0) in self.defs: state[-1] = ignored else: state[-1] = accepted elif token == 'include' or token == 'import': (kind, inc) = extract_include(line, self.defs) if inc in self.ban_includes: return if token == 'import': self.ban_includes.add(inc) if ve: debug('preproc: include found %s (%s) ', inc, kind) if kind == '"' or not strict_quotes: self.tryfind(inc) elif token == 'elif': if state[-1] == accepted: state[-1] = skipped elif state[-1] == ignored: if eval_macro(tokenize(line), self.defs): state[-1] = accepted elif token == 'else': if state[-1] == accepted: state[-1] = skipped elif state[-1] == ignored: state[-1] = accepted elif token == 'define': try: self.defs[define_name(line)] = line except: raise PreprocError("invalid define line %s" % line) elif token == 'undef': m = re_mac.match(line) if m and m.group(0) in self.defs: self.defs.__delitem__(m.group(0)) #print "undef %s" % name elif token == 'pragma': if re_pragma_once.match(line.lower()): self.ban_includes.add(self.curfile) def get_deps(node, env, nodepaths=[]): """ Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind #include some_macro() """ gruik = c_parser(nodepaths) gruik.start(node, env) return (gruik.nodes, gruik.names) #################### dumb dependency scanner re_inc = re.compile(\ '^[ \t]*(#|%:)[ \t]*(include)[ \t]*(.*)\r*$', re.IGNORECASE | re.MULTILINE) def lines_includes(filename): code = Utils.readf(filename) if use_trigraphs: for (a, b) in trig_def: code = code.split(a).join(b) code = re_nl.sub('', code) code = re_cpp.sub(repl, code) return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)] def get_deps_simple(node, env, nodepaths=[], defines={}): """ Get the dependencies by just looking recursively at the #include statements """ nodes = [] names = [] def find_deps(node): lst = lines_includes(node.abspath(env)) for (_, line) in lst: (t, filename) = extract_include(line, defines) if filename in names: continue if filename.endswith('.moc'): names.append(filename) found = None for n in nodepaths: if found: break found = n.find_resource(filename) if not found: if not filename in names: names.append(filename) elif not found in nodes: nodes.append(found) find_deps(node) find_deps(node) return (nodes, names) ntdb-1.0/buildtools/wafadmin/Tools/python.py000066400000000000000000000332771224151530700212560ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2007 (ita) # Gustavo Carneiro (gjc), 2007 "Python support" import os, sys import TaskGen, Utils, Options from Logs import debug, warn, info from TaskGen import extension, before, after, feature from Configure import conf from config_c import parse_flags EXT_PY = ['.py'] FRAG_2 = ''' #include "Python.h" #ifdef __cplusplus extern "C" { #endif void Py_Initialize(void); void Py_Finalize(void); #ifdef __cplusplus } #endif int main() { Py_Initialize(); Py_Finalize(); return 0; } ''' @feature('pyext') @before('apply_incpaths', 'apply_lib_vars', 'apply_type_vars', 'apply_bundle') @after('vars_target_cshlib') def init_pyext(self): self.default_install_path = '${PYTHONARCHDIR}' self.uselib = self.to_list(getattr(self, 'uselib', '')) if not 'PYEXT' in self.uselib: self.uselib.append('PYEXT') self.env['MACBUNDLE'] = True @before('apply_link', 'apply_lib_vars', 'apply_type_vars') @after('apply_bundle') @feature('pyext') def pyext_shlib_ext(self): # override shlib_PATTERN set by the osx module self.env['shlib_PATTERN'] = self.env['pyext_PATTERN'] @before('apply_incpaths', 'apply_lib_vars', 'apply_type_vars') @feature('pyembed') def init_pyembed(self): self.uselib = self.to_list(getattr(self, 'uselib', '')) if not 'PYEMBED' in self.uselib: self.uselib.append('PYEMBED') @extension(EXT_PY) def process_py(self, node): if not (self.bld.is_install and self.install_path): return def inst_py(ctx): install_pyfile(self, node) self.bld.add_post_fun(inst_py) def install_pyfile(self, node): path = self.bld.get_install_path(self.install_path + os.sep + node.name, self.env) self.bld.install_files(self.install_path, [node], self.env, self.chmod, postpone=False) if self.bld.is_install < 0: info("* removing byte compiled python files") for x in 'co': try: os.remove(path + x) except OSError: pass if self.bld.is_install > 0: if self.env['PYC'] or self.env['PYO']: info("* byte compiling %r" % path) if self.env['PYC']: program = (""" import sys, py_compile for pyfile in sys.argv[1:]: py_compile.compile(pyfile, pyfile + 'c') """) argv = [self.env['PYTHON'], '-c', program, path] ret = Utils.pproc.Popen(argv).wait() if ret: raise Utils.WafError('bytecode compilation failed %r' % path) if self.env['PYO']: program = (""" import sys, py_compile for pyfile in sys.argv[1:]: py_compile.compile(pyfile, pyfile + 'o') """) argv = [self.env['PYTHON'], self.env['PYFLAGS_OPT'], '-c', program, path] ret = Utils.pproc.Popen(argv).wait() if ret: raise Utils.WafError('bytecode compilation failed %r' % path) # COMPAT class py_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @before('apply_core') @after('vars_target_cprogram', 'vars_target_cshlib') @feature('py') def init_py(self): self.default_install_path = '${PYTHONDIR}' def _get_python_variables(python_exe, variables, imports=['import sys']): """Run a python interpreter and print some variables""" program = list(imports) program.append('') for v in variables: program.append("print(repr(%s))" % v) os_env = dict(os.environ) try: del os_env['MACOSX_DEPLOYMENT_TARGET'] # see comments in the OSX tool except KeyError: pass proc = Utils.pproc.Popen([python_exe, "-c", '\n'.join(program)], stdout=Utils.pproc.PIPE, env=os_env) output = proc.communicate()[0].split("\n") # do not touch, python3 if proc.returncode: if Options.options.verbose: warn("Python program to extract python configuration variables failed:\n%s" % '\n'.join(["line %03i: %s" % (lineno+1, line) for lineno, line in enumerate(program)])) raise RuntimeError return_values = [] for s in output: s = s.strip() if not s: continue if s == 'None': return_values.append(None) elif (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'): return_values.append(eval(s)) elif s[0].isdigit(): return_values.append(int(s)) else: break return return_values @conf def check_python_headers(conf, mandatory=True): """Check for headers and libraries necessary to extend or embed python. On success the environment variables xxx_PYEXT and xxx_PYEMBED are added for uselib PYEXT: for compiling python extensions PYEMBED: for embedding a python interpreter""" if not conf.env['CC_NAME'] and not conf.env['CXX_NAME']: conf.fatal('load a compiler first (gcc, g++, ..)') if not conf.env['PYTHON_VERSION']: conf.check_python_version() env = conf.env python = env['PYTHON'] if not python: conf.fatal('could not find the python executable') ## On Mac OSX we need to use mac bundles for python plugins if Options.platform == 'darwin': conf.check_tool('osx') try: # Get some python configuration variables using distutils v = 'prefix SO SYSLIBS LDFLAGS SHLIBS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET'.split() (python_prefix, python_SO, python_SYSLIBS, python_LDFLAGS, python_SHLIBS, python_LIBDIR, python_LIBPL, INCLUDEPY, Py_ENABLE_SHARED, python_MACOSX_DEPLOYMENT_TARGET) = \ _get_python_variables(python, ["get_config_var('%s') or ''" % x for x in v], ['from distutils.sysconfig import get_config_var']) except RuntimeError: conf.fatal("Python development headers not found (-v for details).") conf.log.write("""Configuration returned from %r: python_prefix = %r python_SO = %r python_SYSLIBS = %r python_LDFLAGS = %r python_SHLIBS = %r python_LIBDIR = %r python_LIBPL = %r INCLUDEPY = %r Py_ENABLE_SHARED = %r MACOSX_DEPLOYMENT_TARGET = %r """ % (python, python_prefix, python_SO, python_SYSLIBS, python_LDFLAGS, python_SHLIBS, python_LIBDIR, python_LIBPL, INCLUDEPY, Py_ENABLE_SHARED, python_MACOSX_DEPLOYMENT_TARGET)) if python_MACOSX_DEPLOYMENT_TARGET: conf.env['MACOSX_DEPLOYMENT_TARGET'] = python_MACOSX_DEPLOYMENT_TARGET conf.environ['MACOSX_DEPLOYMENT_TARGET'] = python_MACOSX_DEPLOYMENT_TARGET env['pyext_PATTERN'] = '%s'+python_SO # Check for python libraries for embedding if python_SYSLIBS is not None: for lib in python_SYSLIBS.split(): if lib.startswith('-l'): lib = lib[2:] # strip '-l' env.append_value('LIB_PYEMBED', lib) if python_SHLIBS is not None: for lib in python_SHLIBS.split(): if lib.startswith('-l'): env.append_value('LIB_PYEMBED', lib[2:]) # strip '-l' else: env.append_value('LINKFLAGS_PYEMBED', lib) if Options.platform != 'darwin' and python_LDFLAGS: parse_flags(python_LDFLAGS, 'PYEMBED', env) result = False name = 'python' + env['PYTHON_VERSION'] if python_LIBDIR is not None: path = [python_LIBDIR] conf.log.write("\n\n# Trying LIBDIR: %r\n" % path) result = conf.check(lib=name, uselib='PYEMBED', libpath=path) if not result and python_LIBPL is not None: conf.log.write("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n") path = [python_LIBPL] result = conf.check(lib=name, uselib='PYEMBED', libpath=path) if not result: conf.log.write("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n") path = [os.path.join(python_prefix, "libs")] name = 'python' + env['PYTHON_VERSION'].replace('.', '') result = conf.check(lib=name, uselib='PYEMBED', libpath=path) if result: env['LIBPATH_PYEMBED'] = path env.append_value('LIB_PYEMBED', name) else: conf.log.write("\n\n### LIB NOT FOUND\n") # under certain conditions, python extensions must link to # python libraries, not just python embedding programs. if (sys.platform == 'win32' or sys.platform.startswith('os2') or sys.platform == 'darwin' or Py_ENABLE_SHARED): env['LIBPATH_PYEXT'] = env['LIBPATH_PYEMBED'] env['LIB_PYEXT'] = env['LIB_PYEMBED'] # We check that pythonX.Y-config exists, and if it exists we # use it to get only the includes, else fall back to distutils. python_config = conf.find_program( 'python%s-config' % ('.'.join(env['PYTHON_VERSION'].split('.')[:2])), var='PYTHON_CONFIG') if not python_config: python_config = conf.find_program( 'python-config-%s' % ('.'.join(env['PYTHON_VERSION'].split('.')[:2])), var='PYTHON_CONFIG') includes = [] if python_config: for incstr in Utils.cmd_output("%s --includes" % (python_config,)).strip().split(): # strip the -I or /I if (incstr.startswith('-I') or incstr.startswith('/I')): incstr = incstr[2:] # append include path, unless already given if incstr not in includes: includes.append(incstr) conf.log.write("Include path for Python extensions " "(found via python-config --includes): %r\n" % (includes,)) env['CPPPATH_PYEXT'] = includes env['CPPPATH_PYEMBED'] = includes else: conf.log.write("Include path for Python extensions " "(found via distutils module): %r\n" % (INCLUDEPY,)) env['CPPPATH_PYEXT'] = [INCLUDEPY] env['CPPPATH_PYEMBED'] = [INCLUDEPY] # Code using the Python API needs to be compiled with -fno-strict-aliasing if env['CC_NAME'] == 'gcc': env.append_value('CCFLAGS_PYEMBED', '-fno-strict-aliasing') env.append_value('CCFLAGS_PYEXT', '-fno-strict-aliasing') if env['CXX_NAME'] == 'gcc': env.append_value('CXXFLAGS_PYEMBED', '-fno-strict-aliasing') env.append_value('CXXFLAGS_PYEXT', '-fno-strict-aliasing') # See if it compiles conf.check(define_name='HAVE_PYTHON_H', uselib='PYEMBED', fragment=FRAG_2, errmsg='Could not find the python development headers', mandatory=mandatory) @conf def check_python_version(conf, minver=None): """ Check if the python interpreter is found matching a given minimum version. minver should be a tuple, eg. to check for python >= 2.4.2 pass (2,4,2) as minver. If successful, PYTHON_VERSION is defined as 'MAJOR.MINOR' (eg. '2.4') of the actual python version found, and PYTHONDIR is defined, pointing to the site-packages directory appropriate for this python version, where modules/packages/extensions should be installed. """ assert minver is None or isinstance(minver, tuple) python = conf.env['PYTHON'] if not python: conf.fatal('could not find the python executable') # Get python version string cmd = [python, "-c", "import sys\nfor x in sys.version_info: print(str(x))"] debug('python: Running python command %r' % cmd) proc = Utils.pproc.Popen(cmd, stdout=Utils.pproc.PIPE, shell=False) lines = proc.communicate()[0].split() assert len(lines) == 5, "found %i lines, expected 5: %r" % (len(lines), lines) pyver_tuple = (int(lines[0]), int(lines[1]), int(lines[2]), lines[3], int(lines[4])) # compare python version with the minimum required result = (minver is None) or (pyver_tuple >= minver) if result: # define useful environment variables pyver = '.'.join([str(x) for x in pyver_tuple[:2]]) conf.env['PYTHON_VERSION'] = pyver if 'PYTHONDIR' in conf.environ: pydir = conf.environ['PYTHONDIR'] else: if sys.platform == 'win32': (python_LIBDEST, pydir) = \ _get_python_variables(python, ["get_config_var('LIBDEST') or ''", "get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']], ['from distutils.sysconfig import get_config_var, get_python_lib']) else: python_LIBDEST = None (pydir,) = \ _get_python_variables(python, ["get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']], ['from distutils.sysconfig import get_config_var, get_python_lib']) if python_LIBDEST is None: if conf.env['LIBDIR']: python_LIBDEST = os.path.join(conf.env['LIBDIR'], "python" + pyver) else: python_LIBDEST = os.path.join(conf.env['PREFIX'], "lib", "python" + pyver) if 'PYTHONARCHDIR' in conf.environ: pyarchdir = conf.environ['PYTHONARCHDIR'] else: (pyarchdir,) = _get_python_variables(python, ["get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']], ['from distutils.sysconfig import get_config_var, get_python_lib']) if not pyarchdir: pyarchdir = pydir if hasattr(conf, 'define'): # conf.define is added by the C tool, so may not exist conf.define('PYTHONDIR', pydir) conf.define('PYTHONARCHDIR', pyarchdir) conf.env['PYTHONDIR'] = pydir # Feedback pyver_full = '.'.join(map(str, pyver_tuple[:3])) if minver is None: conf.check_message_custom('Python version', '', pyver_full) else: minver_str = '.'.join(map(str, minver)) conf.check_message('Python version', ">= %s" % minver_str, result, option=pyver_full) if not result: conf.fatal('The python version is too old (%r)' % pyver_full) @conf def check_python_module(conf, module_name): """ Check if the selected python interpreter can import the given python module. """ result = not Utils.pproc.Popen([conf.env['PYTHON'], "-c", "import %s" % module_name], stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE).wait() conf.check_message('Python module', module_name, result) if not result: conf.fatal('Could not find the python module %r' % module_name) def detect(conf): if not conf.env.PYTHON: conf.env.PYTHON = sys.executable python = conf.find_program('python', var='PYTHON') if not python: conf.fatal('Could not find the path of the python executable') if conf.env.PYTHON != sys.executable: warn("python executable '%s' different from sys.executable '%s'" % (conf.env.PYTHON, sys.executable)) v = conf.env v['PYCMD'] = '"import sys, py_compile;py_compile.compile(sys.argv[1], sys.argv[2])"' v['PYFLAGS'] = '' v['PYFLAGS_OPT'] = '-O' v['PYC'] = getattr(Options.options, 'pyc', 1) v['PYO'] = getattr(Options.options, 'pyo', 1) def set_options(opt): opt.add_option('--nopyc', action='store_false', default=1, help = 'Do not install bytecode compiled .pyc files (configuration) [Default:install]', dest = 'pyc') opt.add_option('--nopyo', action='store_false', default=1, help='Do not install optimised compiled .pyo files (configuration) [Default:install]', dest='pyo') ntdb-1.0/buildtools/wafadmin/Tools/qt4.py000066400000000000000000000345201224151530700204350ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) """ Qt4 support If QT4_ROOT is given (absolute path), the configuration will look in it first This module also demonstrates how to add tasks dynamically (when the build has started) """ try: from xml.sax import make_parser from xml.sax.handler import ContentHandler except ImportError: has_xml = False ContentHandler = object else: has_xml = True import os, sys import ccroot, cxx import TaskGen, Task, Utils, Runner, Options, Node, Configure from TaskGen import taskgen, feature, after, extension from Logs import error from Constants import * MOC_H = ['.h', '.hpp', '.hxx', '.hh'] EXT_RCC = ['.qrc'] EXT_UI = ['.ui'] EXT_QT4 = ['.cpp', '.cc', '.cxx', '.C'] class qxx_task(Task.Task): "A cpp task that may create a moc task dynamically" before = ['cxx_link', 'static_link'] def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.moc_done = 0 def scan(self): (nodes, names) = ccroot.scan(self) # for some reasons (variants) the moc node may end in the list of node deps for x in nodes: if x.name.endswith('.moc'): nodes.remove(x) names.append(x.relpath_gen(self.inputs[0].parent)) return (nodes, names) def runnable_status(self): if self.moc_done: # if there is a moc task, delay the computation of the file signature for t in self.run_after: if not t.hasrun: return ASK_LATER # the moc file enters in the dependency calculation # so we need to recompute the signature when the moc file is present self.signature() return Task.Task.runnable_status(self) else: # yes, really, there are people who generate cxx files for t in self.run_after: if not t.hasrun: return ASK_LATER self.add_moc_tasks() return ASK_LATER def add_moc_tasks(self): node = self.inputs[0] tree = node.__class__.bld try: # compute the signature once to know if there is a moc file to create self.signature() except KeyError: # the moc file may be referenced somewhere else pass else: # remove the signature, it must be recomputed with the moc task delattr(self, 'cache_sig') moctasks=[] mocfiles=[] variant = node.variant(self.env) try: tmp_lst = tree.raw_deps[self.unique_id()] tree.raw_deps[self.unique_id()] = [] except KeyError: tmp_lst = [] for d in tmp_lst: if not d.endswith('.moc'): continue # paranoid check if d in mocfiles: error("paranoia owns") continue # process that base.moc only once mocfiles.append(d) # find the extension (performed only when the .cpp has changes) base2 = d[:-4] for path in [node.parent] + self.generator.env['INC_PATHS']: tree.rescan(path) vals = getattr(Options.options, 'qt_header_ext', '') or MOC_H for ex in vals: h_node = path.find_resource(base2 + ex) if h_node: break else: continue break else: raise Utils.WafError("no header found for %s which is a moc file" % str(d)) m_node = h_node.change_ext('.moc') tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), m_node.name)] = h_node # create the task task = Task.TaskBase.classes['moc'](self.env, normal=0) task.set_inputs(h_node) task.set_outputs(m_node) generator = tree.generator generator.outstanding.insert(0, task) generator.total += 1 moctasks.append(task) # remove raw deps except the moc files to save space (optimization) tmp_lst = tree.raw_deps[self.unique_id()] = mocfiles # look at the file inputs, it is set right above lst = tree.node_deps.get(self.unique_id(), ()) for d in lst: name = d.name if name.endswith('.moc'): task = Task.TaskBase.classes['moc'](self.env, normal=0) task.set_inputs(tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), name)]) # 1st element in a tuple task.set_outputs(d) generator = tree.generator generator.outstanding.insert(0, task) generator.total += 1 moctasks.append(task) # simple scheduler dependency: run the moc task before others self.run_after = moctasks self.moc_done = 1 run = Task.TaskBase.classes['cxx'].__dict__['run'] def translation_update(task): outs = [a.abspath(task.env) for a in task.outputs] outs = " ".join(outs) lupdate = task.env['QT_LUPDATE'] for x in task.inputs: file = x.abspath(task.env) cmd = "%s %s -ts %s" % (lupdate, file, outs) Utils.pprint('BLUE', cmd) task.generator.bld.exec_command(cmd) class XMLHandler(ContentHandler): def __init__(self): self.buf = [] self.files = [] def startElement(self, name, attrs): if name == 'file': self.buf = [] def endElement(self, name): if name == 'file': self.files.append(''.join(self.buf)) def characters(self, cars): self.buf.append(cars) def scan(self): "add the dependency on the files referenced in the qrc" node = self.inputs[0] parser = make_parser() curHandler = XMLHandler() parser.setContentHandler(curHandler) fi = open(self.inputs[0].abspath(self.env)) parser.parse(fi) fi.close() nodes = [] names = [] root = self.inputs[0].parent for x in curHandler.files: nd = root.find_resource(x) if nd: nodes.append(nd) else: names.append(x) return (nodes, names) @extension(EXT_RCC) def create_rcc_task(self, node): "hook for rcc files" rcnode = node.change_ext('_rc.cpp') rcctask = self.create_task('rcc', node, rcnode) cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o')) self.compiled_tasks.append(cpptask) return cpptask @extension(EXT_UI) def create_uic_task(self, node): "hook for uic tasks" uictask = self.create_task('ui4', node) uictask.outputs = [self.path.find_or_declare(self.env['ui_PATTERN'] % node.name[:-3])] return uictask class qt4_taskgen(cxx.cxx_taskgen): def __init__(self, *k, **kw): cxx.cxx_taskgen.__init__(self, *k, **kw) self.features.append('qt4') @extension('.ts') def add_lang(self, node): """add all the .ts file into self.lang""" self.lang = self.to_list(getattr(self, 'lang', [])) + [node] @feature('qt4') @after('apply_link') def apply_qt4(self): if getattr(self, 'lang', None): update = getattr(self, 'update', None) lst=[] trans=[] for l in self.to_list(self.lang): if not isinstance(l, Node.Node): l = self.path.find_resource(l+'.ts') t = self.create_task('ts2qm', l, l.change_ext('.qm')) lst.append(t.outputs[0]) if update: trans.append(t.inputs[0]) trans_qt4 = getattr(Options.options, 'trans_qt4', False) if update and trans_qt4: # we need the cpp files given, except the rcc task we create after # FIXME may be broken u = Task.TaskCmd(translation_update, self.env, 2) u.inputs = [a.inputs[0] for a in self.compiled_tasks] u.outputs = trans if getattr(self, 'langname', None): t = Task.TaskBase.classes['qm2rcc'](self.env) t.set_inputs(lst) t.set_outputs(self.path.find_or_declare(self.langname+'.qrc')) t.path = self.path k = create_rcc_task(self, t.outputs[0]) self.link_task.inputs.append(k.outputs[0]) self.env.append_value('MOC_FLAGS', self.env._CXXDEFFLAGS) self.env.append_value('MOC_FLAGS', self.env._CXXINCFLAGS) @extension(EXT_QT4) def cxx_hook(self, node): # create the compilation task: cpp or cc try: obj_ext = self.obj_ext except AttributeError: obj_ext = '_%d.o' % self.idx task = self.create_task('qxx', node, node.change_ext(obj_ext)) self.compiled_tasks.append(task) return task def process_qm2rcc(task): outfile = task.outputs[0].abspath(task.env) f = open(outfile, 'w') f.write('\n\n') for k in task.inputs: f.write(' ') #f.write(k.name) f.write(k.path_to_parent(task.path)) f.write('\n') f.write('\n') f.close() b = Task.simple_task_type b('moc', '${QT_MOC} ${MOC_FLAGS} ${SRC} ${MOC_ST} ${TGT}', color='BLUE', vars=['QT_MOC', 'MOC_FLAGS'], shell=False) cls = b('rcc', '${QT_RCC} -name ${SRC[0].name} ${SRC[0].abspath(env)} ${RCC_ST} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', after="qm2rcc", shell=False) cls.scan = scan b('ui4', '${QT_UIC} ${SRC} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', shell=False) b('ts2qm', '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}', color='BLUE', before='qm2rcc', shell=False) Task.task_type_from_func('qm2rcc', vars=[], func=process_qm2rcc, color='BLUE', before='rcc', after='ts2qm') def detect_qt4(conf): env = conf.env opt = Options.options qtdir = getattr(opt, 'qtdir', '') qtbin = getattr(opt, 'qtbin', '') qtlibs = getattr(opt, 'qtlibs', '') useframework = getattr(opt, 'use_qt4_osxframework', True) paths = [] # the path to qmake has been given explicitely if qtbin: paths = [qtbin] # the qt directory has been given - we deduce the qt binary path if not qtdir: qtdir = conf.environ.get('QT4_ROOT', '') qtbin = os.path.join(qtdir, 'bin') paths = [qtbin] # no qtdir, look in the path and in /usr/local/Trolltech if not qtdir: paths = os.environ.get('PATH', '').split(os.pathsep) paths.append('/usr/share/qt4/bin/') try: lst = os.listdir('/usr/local/Trolltech/') except OSError: pass else: if lst: lst.sort() lst.reverse() # keep the highest version qtdir = '/usr/local/Trolltech/%s/' % lst[0] qtbin = os.path.join(qtdir, 'bin') paths.append(qtbin) # at the end, try to find qmake in the paths given # keep the one with the highest version cand = None prev_ver = ['4', '0', '0'] for qmk in ['qmake-qt4', 'qmake4', 'qmake']: qmake = conf.find_program(qmk, path_list=paths) if qmake: try: version = Utils.cmd_output([qmake, '-query', 'QT_VERSION']).strip() except ValueError: pass else: if version: new_ver = version.split('.') if new_ver > prev_ver: cand = qmake prev_ver = new_ver if cand: qmake = cand else: conf.fatal('could not find qmake for qt4') conf.env.QMAKE = qmake qtincludes = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_HEADERS']).strip() qtdir = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_PREFIX']).strip() + os.sep qtbin = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_BINS']).strip() + os.sep if not qtlibs: try: qtlibs = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_LIBS']).strip() + os.sep except ValueError: qtlibs = os.path.join(qtdir, 'lib') def find_bin(lst, var): for f in lst: ret = conf.find_program(f, path_list=paths) if ret: env[var]=ret break vars = "QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtWebKit Qt3Support".split() find_bin(['uic-qt3', 'uic3'], 'QT_UIC3') find_bin(['uic-qt4', 'uic'], 'QT_UIC') if not env['QT_UIC']: conf.fatal('cannot find the uic compiler for qt4') try: version = Utils.cmd_output(env['QT_UIC'] + " -version 2>&1").strip() except ValueError: conf.fatal('your uic compiler is for qt3, add uic for qt4 to your path') version = version.replace('Qt User Interface Compiler ','') version = version.replace('User Interface Compiler for Qt', '') if version.find(" 3.") != -1: conf.check_message('uic version', '(too old)', 0, option='(%s)'%version) sys.exit(1) conf.check_message('uic version', '', 1, option='(%s)'%version) find_bin(['moc-qt4', 'moc'], 'QT_MOC') find_bin(['rcc'], 'QT_RCC') find_bin(['lrelease-qt4', 'lrelease'], 'QT_LRELEASE') find_bin(['lupdate-qt4', 'lupdate'], 'QT_LUPDATE') env['UIC3_ST']= '%s -o %s' env['UIC_ST'] = '%s -o %s' env['MOC_ST'] = '-o' env['ui_PATTERN'] = 'ui_%s.h' env['QT_LRELEASE_FLAGS'] = ['-silent'] vars_debug = [a+'_debug' for a in vars] try: conf.find_program('pkg-config', var='pkgconfig', path_list=paths, mandatory=True) except Configure.ConfigurationError: for lib in vars_debug+vars: uselib = lib.upper() d = (lib.find('_debug') > 0) and 'd' or '' # original author seems to prefer static to shared libraries for (pat, kind) in ((conf.env.staticlib_PATTERN, 'STATIC'), (conf.env.shlib_PATTERN, '')): conf.check_message_1('Checking for %s %s' % (lib, kind)) for ext in ['', '4']: path = os.path.join(qtlibs, pat % (lib + d + ext)) if os.path.exists(path): env.append_unique(kind + 'LIB_' + uselib, lib + d + ext) conf.check_message_2('ok ' + path, 'GREEN') break path = os.path.join(qtbin, pat % (lib + d + ext)) if os.path.exists(path): env.append_unique(kind + 'LIB_' + uselib, lib + d + ext) conf.check_message_2('ok ' + path, 'GREEN') break else: conf.check_message_2('not found', 'YELLOW') continue break env.append_unique('LIBPATH_' + uselib, qtlibs) env.append_unique('CPPPATH_' + uselib, qtincludes) env.append_unique('CPPPATH_' + uselib, qtincludes + os.sep + lib) else: for i in vars_debug+vars: try: conf.check_cfg(package=i, args='--cflags --libs --silence-errors', path=conf.env.pkgconfig) except ValueError: pass # the libpaths are set nicely, unfortunately they make really long command-lines # remove the qtcore ones from qtgui, etc def process_lib(vars_, coreval): for d in vars_: var = d.upper() if var == 'QTCORE': continue value = env['LIBPATH_'+var] if value: core = env[coreval] accu = [] for lib in value: if lib in core: continue accu.append(lib) env['LIBPATH_'+var] = accu process_lib(vars, 'LIBPATH_QTCORE') process_lib(vars_debug, 'LIBPATH_QTCORE_DEBUG') # rpath if wanted want_rpath = getattr(Options.options, 'want_rpath', 1) if want_rpath: def process_rpath(vars_, coreval): for d in vars_: var = d.upper() value = env['LIBPATH_'+var] if value: core = env[coreval] accu = [] for lib in value: if var != 'QTCORE': if lib in core: continue accu.append('-Wl,--rpath='+lib) env['RPATH_'+var] = accu process_rpath(vars, 'LIBPATH_QTCORE') process_rpath(vars_debug, 'LIBPATH_QTCORE_DEBUG') env['QTLOCALE'] = str(env['PREFIX'])+'/share/locale' def detect(conf): detect_qt4(conf) def set_options(opt): opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]') opt.add_option('--header-ext', type='string', default='', help='header extension for moc files', dest='qt_header_ext') for i in 'qtdir qtbin qtlibs'.split(): opt.add_option('--'+i, type='string', default='', dest=i) if sys.platform == "darwin": opt.add_option('--no-qt4-framework', action="store_false", help='do not use the framework version of Qt4 in OS X', dest='use_qt4_osxframework',default=True) opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt4", default=False) ntdb-1.0/buildtools/wafadmin/Tools/ruby.py000066400000000000000000000072771224151530700207170ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # daniel.svensson at purplescout.se 2008 import os import Task, Options, Utils from TaskGen import before, feature, after from Configure import conf @feature('rubyext') @before('apply_incpaths', 'apply_type_vars', 'apply_lib_vars', 'apply_bundle') @after('default_cc', 'vars_target_cshlib') def init_rubyext(self): self.default_install_path = '${ARCHDIR_RUBY}' self.uselib = self.to_list(getattr(self, 'uselib', '')) if not 'RUBY' in self.uselib: self.uselib.append('RUBY') if not 'RUBYEXT' in self.uselib: self.uselib.append('RUBYEXT') @feature('rubyext') @before('apply_link') def apply_ruby_so_name(self): self.env['shlib_PATTERN'] = self.env['rubyext_PATTERN'] @conf def check_ruby_version(conf, minver=()): """ Checks if ruby is installed. If installed the variable RUBY will be set in environment. Ruby binary can be overridden by --with-ruby-binary config variable """ if Options.options.rubybinary: conf.env.RUBY = Options.options.rubybinary else: conf.find_program("ruby", var="RUBY", mandatory=True) ruby = conf.env.RUBY try: version = Utils.cmd_output([ruby, '-e', 'puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip() except: conf.fatal('could not determine ruby version') conf.env.RUBY_VERSION = version try: ver = tuple(map(int, version.split("."))) except: conf.fatal('unsupported ruby version %r' % version) cver = '' if minver: if ver < minver: conf.fatal('ruby is too old') cver = ".".join([str(x) for x in minver]) conf.check_message('ruby', cver, True, version) @conf def check_ruby_ext_devel(conf): if not conf.env.RUBY: conf.fatal('ruby detection is required first') if not conf.env.CC_NAME and not conf.env.CXX_NAME: conf.fatal('load a c/c++ compiler first') version = tuple(map(int, conf.env.RUBY_VERSION.split("."))) def read_out(cmd): return Utils.to_list(Utils.cmd_output([conf.env.RUBY, '-rrbconfig', '-e', cmd])) def read_config(key): return read_out('puts Config::CONFIG[%r]' % key) ruby = conf.env['RUBY'] archdir = read_config('archdir') cpppath = archdir if version >= (1, 9, 0): ruby_hdrdir = read_config('rubyhdrdir') cpppath += ruby_hdrdir cpppath += [os.path.join(ruby_hdrdir[0], read_config('arch')[0])] conf.check(header_name='ruby.h', includes=cpppath, mandatory=True, errmsg='could not find ruby header file') conf.env.LIBPATH_RUBYEXT = read_config('libdir') conf.env.LIBPATH_RUBYEXT += archdir conf.env.CPPPATH_RUBYEXT = cpppath conf.env.CCFLAGS_RUBYEXT = read_config("CCDLFLAGS") conf.env.rubyext_PATTERN = '%s.' + read_config('DLEXT')[0] # ok this is really stupid, but the command and flags are combined. # so we try to find the first argument... flags = read_config('LDSHARED') while flags and flags[0][0] != '-': flags = flags[1:] # we also want to strip out the deprecated ppc flags if len(flags) > 1 and flags[1] == "ppc": flags = flags[2:] conf.env.LINKFLAGS_RUBYEXT = flags conf.env.LINKFLAGS_RUBYEXT += read_config("LIBS") conf.env.LINKFLAGS_RUBYEXT += read_config("LIBRUBYARG_SHARED") if Options.options.rubyarchdir: conf.env.ARCHDIR_RUBY = Options.options.rubyarchdir else: conf.env.ARCHDIR_RUBY = read_config('sitearchdir')[0] if Options.options.rubylibdir: conf.env.LIBDIR_RUBY = Options.options.rubylibdir else: conf.env.LIBDIR_RUBY = read_config('sitelibdir')[0] def set_options(opt): opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files') opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path') opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary') ntdb-1.0/buildtools/wafadmin/Tools/suncc.py000066400000000000000000000035221224151530700210360ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) # Ralf Habacker, 2006 (rh) import os, optparse import Utils, Options, Configure import ccroot, ar from Configure import conftest @conftest def find_scc(conf): v = conf.env cc = None if v['CC']: cc = v['CC'] elif 'CC' in conf.environ: cc = conf.environ['CC'] #if not cc: cc = conf.find_program('gcc', var='CC') if not cc: cc = conf.find_program('cc', var='CC') if not cc: conf.fatal('suncc was not found') cc = conf.cmd_to_list(cc) try: if not Utils.cmd_output(cc + ['-flags']): conf.fatal('suncc %r was not found' % cc) except ValueError: conf.fatal('suncc -flags could not be executed') v['CC'] = cc v['CC_NAME'] = 'sun' @conftest def scc_common_flags(conf): v = conf.env # CPPFLAGS CCDEFINES _CCINCFLAGS _CCDEFFLAGS v['CC_SRC_F'] = '' v['CC_TGT_F'] = ['-c', '-o', ''] v['CPPPATH_ST'] = '-I%s' # template for adding include paths # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = '' v['CCLNK_TGT_F'] = ['-o', ''] # solaris hack, separate the -o from the target v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STATICLIB_ST'] = '-l%s' v['STATICLIBPATH_ST'] = '-L%s' v['CCDEFINES_ST'] = '-D%s' v['SONAME_ST'] = '-Wl,-h -Wl,%s' v['SHLIB_MARKER'] = '-Bdynamic' v['STATICLIB_MARKER'] = '-Bstatic' # program v['program_PATTERN'] = '%s' # shared library v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC'] v['shlib_LINKFLAGS'] = ['-G'] v['shlib_PATTERN'] = 'lib%s.so' # static lib v['staticlib_LINKFLAGS'] = ['-Bstatic'] v['staticlib_PATTERN'] = 'lib%s.a' detect = ''' find_scc find_cpp find_ar scc_common_flags cc_load_tools cc_add_flags link_add_flags ''' ntdb-1.0/buildtools/wafadmin/Tools/suncxx.py000066400000000000000000000034601224151530700212540ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) # Ralf Habacker, 2006 (rh) import os, optparse import Utils, Options, Configure import ccroot, ar from Configure import conftest @conftest def find_sxx(conf): v = conf.env cc = None if v['CXX']: cc = v['CXX'] elif 'CXX' in conf.environ: cc = conf.environ['CXX'] if not cc: cc = conf.find_program('c++', var='CXX') if not cc: conf.fatal('sunc++ was not found') cc = conf.cmd_to_list(cc) try: if not Utils.cmd_output(cc + ['-flags']): conf.fatal('sunc++ %r was not found' % cc) except ValueError: conf.fatal('sunc++ -flags could not be executed') v['CXX'] = cc v['CXX_NAME'] = 'sun' @conftest def sxx_common_flags(conf): v = conf.env # CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS v['CXX_SRC_F'] = '' v['CXX_TGT_F'] = ['-c', '-o', ''] v['CPPPATH_ST'] = '-I%s' # template for adding include paths # linker if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX'] v['CXXLNK_SRC_F'] = '' v['CXXLNK_TGT_F'] = ['-o', ''] # solaris hack, separate the -o from the target v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STATICLIB_ST'] = '-l%s' v['STATICLIBPATH_ST'] = '-L%s' v['CXXDEFINES_ST'] = '-D%s' v['SONAME_ST'] = '-Wl,-h -Wl,%s' v['SHLIB_MARKER'] = '-Bdynamic' v['STATICLIB_MARKER'] = '-Bstatic' # program v['program_PATTERN'] = '%s' # shared library v['shlib_CXXFLAGS'] = ['-Kpic', '-DPIC'] v['shlib_LINKFLAGS'] = ['-G'] v['shlib_PATTERN'] = 'lib%s.so' # static lib v['staticlib_LINKFLAGS'] = ['-Bstatic'] v['staticlib_PATTERN'] = 'lib%s.a' detect = ''' find_sxx find_cpp find_ar sxx_common_flags cxx_load_tools cxx_add_flags link_add_flags ''' ntdb-1.0/buildtools/wafadmin/Tools/tex.py000066400000000000000000000161021224151530700205210ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) "TeX/LaTeX/PDFLaTeX support" import os, re import Utils, TaskGen, Task, Runner, Build from TaskGen import feature, before from Logs import error, warn, debug re_tex = re.compile(r'\\(?Pinclude|input|import|bringin|lstinputlisting){(?P[^{}]*)}', re.M) def scan(self): node = self.inputs[0] env = self.env nodes = [] names = [] if not node: return (nodes, names) code = Utils.readf(node.abspath(env)) curdirnode = self.curdirnode abs = curdirnode.abspath() for match in re_tex.finditer(code): path = match.group('file') if path: for k in ['', '.tex', '.ltx']: # add another loop for the tex include paths? debug('tex: trying %s%s' % (path, k)) try: os.stat(abs+os.sep+path+k) except OSError: continue found = path+k node = curdirnode.find_resource(found) if node: nodes.append(node) else: debug('tex: could not find %s' % path) names.append(path) debug("tex: found the following : %s and names %s" % (nodes, names)) return (nodes, names) latex_fun, _ = Task.compile_fun('latex', '${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False) pdflatex_fun, _ = Task.compile_fun('pdflatex', '${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False) bibtex_fun, _ = Task.compile_fun('bibtex', '${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False) makeindex_fun, _ = Task.compile_fun('bibtex', '${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False) g_bibtex_re = re.compile('bibdata', re.M) def tex_build(task, command='LATEX'): env = task.env bld = task.generator.bld if not env['PROMPT_LATEX']: env.append_value('LATEXFLAGS', '-interaction=batchmode') env.append_value('PDFLATEXFLAGS', '-interaction=batchmode') fun = latex_fun if command == 'PDFLATEX': fun = pdflatex_fun node = task.inputs[0] reldir = node.bld_dir(env) #lst = [] #for c in Utils.split_path(reldir): # if c: lst.append('..') #srcfile = os.path.join(*(lst + [node.srcpath(env)])) #sr2 = os.path.join(*(lst + [node.parent.srcpath(env)])) srcfile = node.abspath(env) sr2 = node.parent.abspath() + os.pathsep + node.parent.abspath(env) + os.pathsep aux_node = node.change_ext('.aux') idx_node = node.change_ext('.idx') nm = aux_node.name docuname = nm[ : len(nm) - 4 ] # 4 is the size of ".aux" # important, set the cwd for everybody task.cwd = task.inputs[0].parent.abspath(task.env) warn('first pass on %s' % command) task.env.env = {'TEXINPUTS': sr2} task.env.SRCFILE = srcfile ret = fun(task) if ret: return ret # look in the .aux file if there is a bibfile to process try: ct = Utils.readf(aux_node.abspath(env)) except (OSError, IOError): error('error bibtex scan') else: fo = g_bibtex_re.findall(ct) # there is a .aux file to process if fo: warn('calling bibtex') task.env.env = {'BIBINPUTS': sr2, 'BSTINPUTS': sr2} task.env.SRCFILE = docuname ret = bibtex_fun(task) if ret: error('error when calling bibtex %s' % docuname) return ret # look on the filesystem if there is a .idx file to process try: idx_path = idx_node.abspath(env) os.stat(idx_path) except OSError: error('error file.idx scan') else: warn('calling makeindex') task.env.SRCFILE = idx_node.name task.env.env = {} ret = makeindex_fun(task) if ret: error('error when calling makeindex %s' % idx_path) return ret hash = '' i = 0 while i < 10: # prevent against infinite loops - one never knows i += 1 # watch the contents of file.aux prev_hash = hash try: hash = Utils.h_file(aux_node.abspath(env)) except KeyError: error('could not read aux.h -> %s' % aux_node.abspath(env)) pass # debug #print "hash is, ", hash, " ", old_hash # stop if file.aux does not change anymore if hash and hash == prev_hash: break # run the command warn('calling %s' % command) task.env.env = {'TEXINPUTS': sr2 + os.pathsep} task.env.SRCFILE = srcfile ret = fun(task) if ret: error('error when calling %s %s' % (command, latex_compile_cmd)) return ret return None # ok latex_vardeps = ['LATEX', 'LATEXFLAGS'] def latex_build(task): return tex_build(task, 'LATEX') pdflatex_vardeps = ['PDFLATEX', 'PDFLATEXFLAGS'] def pdflatex_build(task): return tex_build(task, 'PDFLATEX') class tex_taskgen(TaskGen.task_gen): def __init__(self, *k, **kw): TaskGen.task_gen.__init__(self, *k, **kw) @feature('tex') @before('apply_core') def apply_tex(self): if not getattr(self, 'type', None) in ['latex', 'pdflatex']: self.type = 'pdflatex' tree = self.bld outs = Utils.to_list(getattr(self, 'outs', [])) # prompt for incomplete files (else the batchmode is used) self.env['PROMPT_LATEX'] = getattr(self, 'prompt', 1) deps_lst = [] if getattr(self, 'deps', None): deps = self.to_list(self.deps) for filename in deps: n = self.path.find_resource(filename) if not n in deps_lst: deps_lst.append(n) self.source = self.to_list(self.source) for filename in self.source: base, ext = os.path.splitext(filename) node = self.path.find_resource(filename) if not node: raise Utils.WafError('cannot find %s' % filename) if self.type == 'latex': task = self.create_task('latex', node, node.change_ext('.dvi')) elif self.type == 'pdflatex': task = self.create_task('pdflatex', node, node.change_ext('.pdf')) task.env = self.env task.curdirnode = self.path # add the manual dependencies if deps_lst: variant = node.variant(self.env) try: lst = tree.node_deps[task.unique_id()] for n in deps_lst: if not n in lst: lst.append(n) except KeyError: tree.node_deps[task.unique_id()] = deps_lst if self.type == 'latex': if 'ps' in outs: tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps')) tsk.env.env = {'TEXINPUTS' : node.parent.abspath() + os.pathsep + self.path.abspath() + os.pathsep + self.path.abspath(self.env)} if 'pdf' in outs: tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf')) tsk.env.env = {'TEXINPUTS' : node.parent.abspath() + os.pathsep + self.path.abspath() + os.pathsep + self.path.abspath(self.env)} elif self.type == 'pdflatex': if 'ps' in outs: self.create_task('pdf2ps', task.outputs, node.change_ext('.ps')) self.source = [] def detect(conf): v = conf.env for p in 'tex latex pdflatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split(): conf.find_program(p, var=p.upper()) v[p.upper()+'FLAGS'] = '' v['DVIPSFLAGS'] = '-Ppdf' b = Task.simple_task_type b('tex', '${TEX} ${TEXFLAGS} ${SRC}', color='BLUE', shell=False) # not used anywhere b('bibtex', '${BIBTEX} ${BIBTEXFLAGS} ${SRC}', color='BLUE', shell=False) # not used anywhere b('dvips', '${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}', color='BLUE', after="latex pdflatex tex bibtex", shell=False) b('dvipdf', '${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}', color='BLUE', after="latex pdflatex tex bibtex", shell=False) b('pdf2ps', '${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}', color='BLUE', after="dvipdf pdflatex", shell=False) b = Task.task_type_from_func cls = b('latex', latex_build, vars=latex_vardeps) cls.scan = scan cls = b('pdflatex', pdflatex_build, vars=pdflatex_vardeps) cls.scan = scan ntdb-1.0/buildtools/wafadmin/Tools/unittestw.py000066400000000000000000000230771224151530700220000ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2006 """ Unit tests run in the shutdown() method, and for c/c++ programs One should NOT have to give parameters to programs to execute In the shutdown method, add the following code: >>> def shutdown(): ... ut = UnitTest.unit_test() ... ut.run() ... ut.print_results() Each object to use as a unit test must be a program and must have X{obj.unit_test=1} """ import os, sys import Build, TaskGen, Utils, Options, Logs, Task from TaskGen import before, after, feature from Constants import * class unit_test(object): "Unit test representation" def __init__(self): self.returncode_ok = 0 # Unit test returncode considered OK. All returncodes differing from this one # will cause the unit test to be marked as "FAILED". # The following variables are filled with data by run(). # print_results() uses these for printing the unit test summary, # but if there is need for direct access to the results, # they can be retrieved here, after calling run(). self.num_tests_ok = 0 # Number of successful unit tests self.num_tests_failed = 0 # Number of failed unit tests self.num_tests_err = 0 # Tests that have not even run self.total_num_tests = 0 # Total amount of unit tests self.max_label_length = 0 # Maximum label length (pretty-print the output) self.unit_tests = Utils.ordered_dict() # Unit test dictionary. Key: the label (unit test filename relative # to the build dir), value: unit test filename with absolute path self.unit_test_results = {} # Dictionary containing the unit test results. # Key: the label, value: result (true = success false = failure) self.unit_test_erroneous = {} # Dictionary indicating erroneous unit tests. # Key: the label, value: true = unit test has an error false = unit test is ok self.change_to_testfile_dir = False #True if the test file needs to be executed from the same dir self.want_to_see_test_output = False #True to see the stdout from the testfile (for example check suites) self.want_to_see_test_error = False #True to see the stderr from the testfile (for example check suites) self.run_if_waf_does = 'check' #build was the old default def run(self): "Run the unit tests and gather results (note: no output here)" self.num_tests_ok = 0 self.num_tests_failed = 0 self.num_tests_err = 0 self.total_num_tests = 0 self.max_label_length = 0 self.unit_tests = Utils.ordered_dict() self.unit_test_results = {} self.unit_test_erroneous = {} ld_library_path = [] # If waf is not building, don't run anything if not Options.commands[self.run_if_waf_does]: return # Get the paths for the shared libraries, and obtain the unit tests to execute for obj in Build.bld.all_task_gen: try: link_task = obj.link_task except AttributeError: pass else: lib_path = link_task.outputs[0].parent.abspath(obj.env) if lib_path not in ld_library_path: ld_library_path.append(lib_path) unit_test = getattr(obj, 'unit_test', '') if unit_test and 'cprogram' in obj.features: try: output = obj.path filename = os.path.join(output.abspath(obj.env), obj.target) srcdir = output.abspath() label = os.path.join(output.bldpath(obj.env), obj.target) self.max_label_length = max(self.max_label_length, len(label)) self.unit_tests[label] = (filename, srcdir) except KeyError: pass self.total_num_tests = len(self.unit_tests) # Now run the unit tests Utils.pprint('GREEN', 'Running the unit tests') count = 0 result = 1 for label in self.unit_tests.allkeys: file_and_src = self.unit_tests[label] filename = file_and_src[0] srcdir = file_and_src[1] count += 1 line = Build.bld.progress_line(count, self.total_num_tests, Logs.colors.GREEN, Logs.colors.NORMAL) if Options.options.progress_bar and line: sys.stderr.write(line) sys.stderr.flush() try: kwargs = {} kwargs['env'] = os.environ.copy() if self.change_to_testfile_dir: kwargs['cwd'] = srcdir if not self.want_to_see_test_output: kwargs['stdout'] = Utils.pproc.PIPE # PIPE for ignoring output if not self.want_to_see_test_error: kwargs['stderr'] = Utils.pproc.PIPE # PIPE for ignoring output if ld_library_path: v = kwargs['env'] def add_path(dct, path, var): dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')]) if sys.platform == 'win32': add_path(v, ld_library_path, 'PATH') elif sys.platform == 'darwin': add_path(v, ld_library_path, 'DYLD_LIBRARY_PATH') add_path(v, ld_library_path, 'LD_LIBRARY_PATH') else: add_path(v, ld_library_path, 'LD_LIBRARY_PATH') pp = Utils.pproc.Popen(filename, **kwargs) (out, err) = pp.communicate() # uh, and the output is ignored?? - fortunately this is going to disappear result = int(pp.returncode == self.returncode_ok) if result: self.num_tests_ok += 1 else: self.num_tests_failed += 1 self.unit_test_results[label] = result self.unit_test_erroneous[label] = 0 except OSError: self.unit_test_erroneous[label] = 1 self.num_tests_err += 1 except KeyboardInterrupt: pass if Options.options.progress_bar: sys.stdout.write(Logs.colors.cursor_on) def print_results(self): "Pretty-prints a summary of all unit tests, along with some statistics" # If waf is not building, don't output anything if not Options.commands[self.run_if_waf_does]: return p = Utils.pprint # Early quit if no tests were performed if self.total_num_tests == 0: p('YELLOW', 'No unit tests present') return for label in self.unit_tests.allkeys: filename = self.unit_tests[label] err = 0 result = 0 try: err = self.unit_test_erroneous[label] except KeyError: pass try: result = self.unit_test_results[label] except KeyError: pass n = self.max_label_length - len(label) if err: n += 4 elif result: n += 7 else: n += 3 line = '%s %s' % (label, '.' * n) if err: p('RED', '%sERROR' % line) elif result: p('GREEN', '%sOK' % line) else: p('YELLOW', '%sFAILED' % line) percentage_ok = float(self.num_tests_ok) / float(self.total_num_tests) * 100.0 percentage_failed = float(self.num_tests_failed) / float(self.total_num_tests) * 100.0 percentage_erroneous = float(self.num_tests_err) / float(self.total_num_tests) * 100.0 p('NORMAL', ''' Successful tests: %i (%.1f%%) Failed tests: %i (%.1f%%) Erroneous tests: %i (%.1f%%) Total number of tests: %i ''' % (self.num_tests_ok, percentage_ok, self.num_tests_failed, percentage_failed, self.num_tests_err, percentage_erroneous, self.total_num_tests)) p('GREEN', 'Unit tests finished') ############################################################################################ """ New unit test system The targets with feature 'test' are executed after they are built bld(features='cprogram cc test', ...) To display the results: import UnitTest bld.add_post_fun(UnitTest.summary) """ import threading testlock = threading.Lock() def set_options(opt): opt.add_option('--alltests', action='store_true', default=True, help='Exec all unit tests', dest='all_tests') @feature('test') @after('apply_link', 'vars_target_cprogram') def make_test(self): if not 'cprogram' in self.features: Logs.error('test cannot be executed %s' % self) return self.default_install_path = None self.create_task('utest', self.link_task.outputs) def exec_test(self): status = 0 variant = self.env.variant() filename = self.inputs[0].abspath(self.env) self.ut_exec = getattr(self, 'ut_exec', [filename]) if getattr(self.generator, 'ut_fun', None): self.generator.ut_fun(self) try: fu = getattr(self.generator.bld, 'all_test_paths') except AttributeError: fu = os.environ.copy() self.generator.bld.all_test_paths = fu lst = [] for obj in self.generator.bld.all_task_gen: link_task = getattr(obj, 'link_task', None) if link_task and link_task.env.variant() == variant: lst.append(link_task.outputs[0].parent.abspath(obj.env)) def add_path(dct, path, var): dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')]) if sys.platform == 'win32': add_path(fu, lst, 'PATH') elif sys.platform == 'darwin': add_path(fu, lst, 'DYLD_LIBRARY_PATH') add_path(fu, lst, 'LD_LIBRARY_PATH') else: add_path(fu, lst, 'LD_LIBRARY_PATH') cwd = getattr(self.generator, 'ut_cwd', '') or self.inputs[0].parent.abspath(self.env) proc = Utils.pproc.Popen(self.ut_exec, cwd=cwd, env=fu, stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE) (stdout, stderr) = proc.communicate() tup = (filename, proc.returncode, stdout, stderr) self.generator.utest_result = tup testlock.acquire() try: bld = self.generator.bld Logs.debug("ut: %r", tup) try: bld.utest_results.append(tup) except AttributeError: bld.utest_results = [tup] finally: testlock.release() cls = Task.task_type_from_func('utest', func=exec_test, color='PINK', ext_in='.bin') old = cls.runnable_status def test_status(self): ret = old(self) if ret == SKIP_ME and getattr(Options.options, 'all_tests', False): return RUN_ME return ret cls.runnable_status = test_status cls.quiet = 1 def summary(bld): lst = getattr(bld, 'utest_results', []) if lst: Utils.pprint('CYAN', 'execution summary') total = len(lst) tfail = len([x for x in lst if x[1]]) Utils.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total)) for (f, code, out, err) in lst: if not code: Utils.pprint('CYAN', ' %s' % f) Utils.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total)) for (f, code, out, err) in lst: if code: Utils.pprint('CYAN', ' %s' % f) ntdb-1.0/buildtools/wafadmin/Tools/vala.py000066400000000000000000000240711224151530700206500ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 import os.path, shutil import Task, Runner, Utils, Logs, Build, Node, Options from TaskGen import extension, after, before EXT_VALA = ['.vala', '.gs'] class valac_task(Task.Task): vars = ("VALAC", "VALAC_VERSION", "VALAFLAGS") before = ("cc", "cxx") def run(self): env = self.env inputs = [a.srcpath(env) for a in self.inputs] valac = env['VALAC'] vala_flags = env.get_flat('VALAFLAGS') top_src = self.generator.bld.srcnode.abspath() top_bld = self.generator.bld.srcnode.abspath(env) if env['VALAC_VERSION'] > (0, 1, 6): cmd = [valac, '-C', '--quiet', vala_flags] else: cmd = [valac, '-C', vala_flags] if self.threading: cmd.append('--thread') if self.profile: cmd.append('--profile=%s' % self.profile) if self.target_glib: cmd.append('--target-glib=%s' % self.target_glib) features = self.generator.features if 'cshlib' in features or 'cstaticlib' in features: output_dir = self.outputs[0].bld_dir(env) cmd.append('--library ' + self.target) if env['VALAC_VERSION'] >= (0, 7, 0): for x in self.outputs: if x.name.endswith('.h'): cmd.append('--header ' + x.bldpath(self.env)) cmd.append('--basedir ' + top_src) cmd.append('-d ' + top_bld) if env['VALAC_VERSION'] > (0, 7, 2) and hasattr(self, 'gir'): cmd.append('--gir=%s.gir' % self.gir) else: output_dir = self.outputs[0].bld_dir(env) cmd.append('-d %s' % output_dir) for vapi_dir in self.vapi_dirs: cmd.append('--vapidir=%s' % vapi_dir) for package in self.packages: cmd.append('--pkg %s' % package) for package in self.packages_private: cmd.append('--pkg %s' % package) cmd.append(" ".join(inputs)) result = self.generator.bld.exec_command(" ".join(cmd)) if not 'cprogram' in features: # generate the .deps file if self.packages: filename = os.path.join(self.generator.path.abspath(env), "%s.deps" % self.target) deps = open(filename, 'w') for package in self.packages: deps.write(package + '\n') deps.close() # handle vala 0.1.6 who doesn't honor --directory for the generated .vapi self._fix_output("../%s.vapi" % self.target) # handle vala >= 0.1.7 who has a weid definition for --directory self._fix_output("%s.vapi" % self.target) # handle vala >= 0.2.0 who doesn't honor --directory for the generated .gidl self._fix_output("%s.gidl" % self.target) # handle vala >= 0.3.6 who doesn't honor --directory for the generated .gir self._fix_output("%s.gir" % self.target) if hasattr(self, 'gir'): self._fix_output("%s.gir" % self.gir) first = None for node in self.outputs: if not first: first = node else: if first.parent.id != node.parent.id: # issue #483 if env['VALAC_VERSION'] < (0, 7, 0): shutil.move(first.parent.abspath(self.env) + os.sep + node.name, node.abspath(self.env)) return result def install(self): bld = self.generator.bld features = self.generator.features if self.attr("install_path") and ("cshlib" in features or "cstaticlib" in features): headers_list = [o for o in self.outputs if o.suffix() == ".h"] vapi_list = [o for o in self.outputs if (o.suffix() in (".vapi", ".deps"))] gir_list = [o for o in self.outputs if o.suffix() == ".gir"] for header in headers_list: top_src = self.generator.bld.srcnode package = self.env['PACKAGE'] try: api_version = Utils.g_module.API_VERSION except AttributeError: version = Utils.g_module.VERSION.split(".") if version[0] == "0": api_version = "0." + version[1] else: api_version = version[0] + ".0" install_path = '${INCLUDEDIR}/%s-%s/%s' % (package, api_version, header.relpath_gen(top_src)) bld.install_as(install_path, header, self.env) bld.install_files('${DATAROOTDIR}/vala/vapi', vapi_list, self.env) bld.install_files('${DATAROOTDIR}/gir-1.0', gir_list, self.env) def _fix_output(self, output): top_bld = self.generator.bld.srcnode.abspath(self.env) try: src = os.path.join(top_bld, output) dst = self.generator.path.abspath (self.env) shutil.move(src, dst) except: pass @extension(EXT_VALA) def vala_file(self, node): valatask = getattr(self, "valatask", None) # there is only one vala task and it compiles all vala files .. :-/ if not valatask: valatask = self.create_task('valac') self.valatask = valatask self.includes = Utils.to_list(getattr(self, 'includes', [])) self.uselib = self.to_list(self.uselib) valatask.packages = [] valatask.packages_private = Utils.to_list(getattr(self, 'packages_private', [])) valatask.vapi_dirs = [] valatask.target = self.target valatask.threading = False valatask.install_path = self.install_path valatask.profile = getattr (self, 'profile', 'gobject') valatask.target_glib = None #Deprecated packages = Utils.to_list(getattr(self, 'packages', [])) vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', [])) includes = [] if hasattr(self, 'uselib_local'): local_packages = Utils.to_list(self.uselib_local) seen = [] while len(local_packages) > 0: package = local_packages.pop() if package in seen: continue seen.append(package) # check if the package exists package_obj = self.name_to_obj(package) if not package_obj: raise Utils.WafError("object '%s' was not found in uselib_local (required by '%s')" % (package, self.name)) package_name = package_obj.target package_node = package_obj.path package_dir = package_node.relpath_gen(self.path) for task in package_obj.tasks: for output in task.outputs: if output.name == package_name + ".vapi": valatask.set_run_after(task) if package_name not in packages: packages.append(package_name) if package_dir not in vapi_dirs: vapi_dirs.append(package_dir) if package_dir not in includes: includes.append(package_dir) if hasattr(package_obj, 'uselib_local'): lst = self.to_list(package_obj.uselib_local) lst.reverse() local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages valatask.packages = packages for vapi_dir in vapi_dirs: try: valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath()) valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath(self.env)) except AttributeError: Logs.warn("Unable to locate Vala API directory: '%s'" % vapi_dir) self.includes.append(node.bld.srcnode.abspath()) self.includes.append(node.bld.srcnode.abspath(self.env)) for include in includes: try: self.includes.append(self.path.find_dir(include).abspath()) self.includes.append(self.path.find_dir(include).abspath(self.env)) except AttributeError: Logs.warn("Unable to locate include directory: '%s'" % include) if valatask.profile == 'gobject': if hasattr(self, 'target_glib'): Logs.warn ('target_glib on vala tasks is deprecated --vala-target-glib=MAJOR.MINOR from the vala tool options') if getattr(Options.options, 'vala_target_glib', None): valatask.target_glib = Options.options.vala_target_glib if not 'GOBJECT' in self.uselib: self.uselib.append('GOBJECT') if hasattr(self, 'threading'): if valatask.profile == 'gobject': valatask.threading = self.threading if not 'GTHREAD' in self.uselib: self.uselib.append('GTHREAD') else: #Vala doesn't have threading support for dova nor posix Logs.warn("Profile %s does not have threading support" % valatask.profile) if hasattr(self, 'gir'): valatask.gir = self.gir env = valatask.env output_nodes = [] c_node = node.change_ext('.c') output_nodes.append(c_node) self.allnodes.append(c_node) if env['VALAC_VERSION'] < (0, 7, 0): output_nodes.append(node.change_ext('.h')) else: if not 'cprogram' in self.features: output_nodes.append(self.path.find_or_declare('%s.h' % self.target)) if not 'cprogram' in self.features: output_nodes.append(self.path.find_or_declare('%s.vapi' % self.target)) if env['VALAC_VERSION'] > (0, 7, 2): if hasattr(self, 'gir'): output_nodes.append(self.path.find_or_declare('%s.gir' % self.gir)) elif env['VALAC_VERSION'] > (0, 3, 5): output_nodes.append(self.path.find_or_declare('%s.gir' % self.target)) elif env['VALAC_VERSION'] > (0, 1, 7): output_nodes.append(self.path.find_or_declare('%s.gidl' % self.target)) if valatask.packages: output_nodes.append(self.path.find_or_declare('%s.deps' % self.target)) valatask.inputs.append(node) valatask.outputs.extend(output_nodes) def detect(conf): min_version = (0, 1, 6) min_version_str = "%d.%d.%d" % min_version valac = conf.find_program('valac', var='VALAC', mandatory=True) if not conf.env["HAVE_GOBJECT"]: pkg_args = {'package': 'gobject-2.0', 'uselib_store': 'GOBJECT', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib conf.check_cfg(**pkg_args) if not conf.env["HAVE_GTHREAD"]: pkg_args = {'package': 'gthread-2.0', 'uselib_store': 'GTHREAD', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib conf.check_cfg(**pkg_args) try: output = Utils.cmd_output(valac + " --version", silent=True) version = output.split(' ', 1)[-1].strip().split(".")[0:3] version = [int(x) for x in version] valac_version = tuple(version) except Exception: valac_version = (0, 0, 0) conf.check_message('program version', 'valac >= ' + min_version_str, valac_version >= min_version, "%d.%d.%d" % valac_version) conf.check_tool('gnu_dirs') if valac_version < min_version: conf.fatal("valac version too old to be used with this tool") return conf.env['VALAC_VERSION'] = valac_version conf.env['VALAFLAGS'] = '' def set_options (opt): valaopts = opt.add_option_group('Vala Compiler Options') valaopts.add_option ('--vala-target-glib', default=None, dest='vala_target_glib', metavar='MAJOR.MINOR', help='Target version of glib for Vala GObject code generation') ntdb-1.0/buildtools/wafadmin/Tools/winres.py000066400000000000000000000024061224151530700212320ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Brant Young, 2007 "This hook is called when the class cpp/cc task generator encounters a '.rc' file: X{.rc -> [.res|.rc.o]}" import os, sys, re import TaskGen, Task from Utils import quote_whitespace from TaskGen import extension EXT_WINRC = ['.rc'] winrc_str = '${WINRC} ${_CPPDEFFLAGS} ${_CCDEFFLAGS} ${WINRCFLAGS} ${_CPPINCFLAGS} ${_CCINCFLAGS} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}' @extension(EXT_WINRC) def rc_file(self, node): obj_ext = '.rc.o' if self.env['WINRC_TGT_F'] == '/fo': obj_ext = '.res' rctask = self.create_task('winrc', node, node.change_ext(obj_ext)) self.compiled_tasks.append(rctask) # create our action, for use with rc file Task.simple_task_type('winrc', winrc_str, color='BLUE', before='cc cxx', shell=False) def detect(conf): v = conf.env winrc = v['WINRC'] v['WINRC_TGT_F'] = '-o' v['WINRC_SRC_F'] = '-i' # find rc.exe if not winrc: if v['CC_NAME'] in ['gcc', 'cc', 'g++', 'c++']: winrc = conf.find_program('windres', var='WINRC', path_list = v['PATH']) elif v['CC_NAME'] == 'msvc': winrc = conf.find_program('RC', var='WINRC', path_list = v['PATH']) v['WINRC_TGT_F'] = '/fo' v['WINRC_SRC_F'] = '' if not winrc: conf.fatal('winrc was not found!') v['WINRCFLAGS'] = '' ntdb-1.0/buildtools/wafadmin/Tools/xlc.py000066400000000000000000000037701224151530700205160ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2008 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 # Michael Kuhn, 2009 import os, sys import Configure, Options, Utils import ccroot, ar from Configure import conftest @conftest def find_xlc(conf): cc = conf.find_program(['xlc_r', 'xlc'], var='CC', mandatory=True) cc = conf.cmd_to_list(cc) conf.env.CC_NAME = 'xlc' conf.env.CC = cc @conftest def find_cpp(conf): v = conf.env cpp = None if v['CPP']: cpp = v['CPP'] elif 'CPP' in conf.environ: cpp = conf.environ['CPP'] #if not cpp: cpp = v['CC'] v['CPP'] = cpp @conftest def xlc_common_flags(conf): v = conf.env # CPPFLAGS CCDEFINES _CCINCFLAGS _CCDEFFLAGS v['CCFLAGS_DEBUG'] = ['-g'] v['CCFLAGS_RELEASE'] = ['-O2'] v['CC_SRC_F'] = '' v['CC_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD v['CPPPATH_ST'] = '-I%s' # template for adding include paths # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = '' v['CCLNK_TGT_F'] = ['-o', ''] # shell hack for -MD v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STATICLIB_ST'] = '-l%s' v['STATICLIBPATH_ST'] = '-L%s' v['RPATH_ST'] = '-Wl,-rpath,%s' v['CCDEFINES_ST'] = '-D%s' v['SONAME_ST'] = '' v['SHLIB_MARKER'] = '' v['STATICLIB_MARKER'] = '' v['FULLSTATIC_MARKER'] = '-static' # program v['program_LINKFLAGS'] = ['-Wl,-brtl'] v['program_PATTERN'] = '%s' # shared library v['shlib_CCFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro v['shlib_LINKFLAGS'] = ['-G', '-Wl,-brtl,-bexpfull'] v['shlib_PATTERN'] = 'lib%s.so' # static lib v['staticlib_LINKFLAGS'] = '' v['staticlib_PATTERN'] = 'lib%s.a' def detect(conf): conf.find_xlc() conf.find_cpp() conf.find_ar() conf.xlc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() ntdb-1.0/buildtools/wafadmin/Tools/xlcxx.py000066400000000000000000000040231224151530700210660ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 # Michael Kuhn, 2009 import os, sys import Configure, Options, Utils import ccroot, ar from Configure import conftest @conftest def find_xlcxx(conf): cxx = conf.find_program(['xlc++_r', 'xlc++'], var='CXX', mandatory=True) cxx = conf.cmd_to_list(cxx) conf.env.CXX_NAME = 'xlc++' conf.env.CXX = cxx @conftest def find_cpp(conf): v = conf.env cpp = None if v['CPP']: cpp = v['CPP'] elif 'CPP' in conf.environ: cpp = conf.environ['CPP'] #if not cpp: cpp = v['CXX'] v['CPP'] = cpp @conftest def xlcxx_common_flags(conf): v = conf.env # CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS v['CXXFLAGS_DEBUG'] = ['-g'] v['CXXFLAGS_RELEASE'] = ['-O2'] v['CXX_SRC_F'] = '' v['CXX_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD v['CPPPATH_ST'] = '-I%s' # template for adding include paths # linker if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX'] v['CXXLNK_SRC_F'] = '' v['CXXLNK_TGT_F'] = ['-o', ''] # shell hack for -MD v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STATICLIB_ST'] = '-l%s' v['STATICLIBPATH_ST'] = '-L%s' v['RPATH_ST'] = '-Wl,-rpath,%s' v['CXXDEFINES_ST'] = '-D%s' v['SONAME_ST'] = '' v['SHLIB_MARKER'] = '' v['STATICLIB_MARKER'] = '' v['FULLSTATIC_MARKER'] = '-static' # program v['program_LINKFLAGS'] = ['-Wl,-brtl'] v['program_PATTERN'] = '%s' # shared library v['shlib_CXXFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro v['shlib_LINKFLAGS'] = ['-G', '-Wl,-brtl,-bexpfull'] v['shlib_PATTERN'] = 'lib%s.so' # static lib v['staticlib_LINKFLAGS'] = '' v['staticlib_PATTERN'] = 'lib%s.a' def detect(conf): conf.find_xlcxx() conf.find_cpp() conf.find_ar() conf.xlcxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() ntdb-1.0/buildtools/wafadmin/Utils.py000066400000000000000000000442651224151530700177340ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005 (ita) """ Utilities, the stable ones are the following: * h_file: compute a unique value for a file (hash), it uses the module fnv if it is installed (see waf/utils/fnv & http://code.google.com/p/waf/wiki/FAQ) else, md5 (see the python docs) For large projects (projects with more than 15000 files) or slow hard disks and filesystems (HFS) it is possible to use a hashing based on the path and the size (may give broken cache results) The method h_file MUST raise an OSError if the file is a folder import stat def h_file(filename): st = os.lstat(filename) if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file') m = Utils.md5() m.update(str(st.st_mtime)) m.update(str(st.st_size)) m.update(filename) return m.digest() To replace the function in your project, use something like this: import Utils Utils.h_file = h_file * h_list * h_fun * get_term_cols * ordered_dict """ import os, sys, imp, string, errno, traceback, inspect, re, shutil, datetime, gc # In python 3.0 we can get rid of all this try: from UserDict import UserDict except ImportError: from collections import UserDict if sys.hexversion >= 0x2060000 or os.name == 'java': import subprocess as pproc else: import pproc import Logs from Constants import * try: from collections import deque except ImportError: class deque(list): def popleft(self): return self.pop(0) is_win32 = sys.platform == 'win32' try: # defaultdict in python 2.5 from collections import defaultdict as DefaultDict except ImportError: class DefaultDict(dict): def __init__(self, default_factory): super(DefaultDict, self).__init__() self.default_factory = default_factory def __getitem__(self, key): try: return super(DefaultDict, self).__getitem__(key) except KeyError: value = self.default_factory() self[key] = value return value class WafError(Exception): def __init__(self, *args): self.args = args try: self.stack = traceback.extract_stack() except: pass Exception.__init__(self, *args) def __str__(self): return str(len(self.args) == 1 and self.args[0] or self.args) class WscriptError(WafError): def __init__(self, message, wscript_file=None): if wscript_file: self.wscript_file = wscript_file self.wscript_line = None else: try: (self.wscript_file, self.wscript_line) = self.locate_error() except: (self.wscript_file, self.wscript_line) = (None, None) msg_file_line = '' if self.wscript_file: msg_file_line = "%s:" % self.wscript_file if self.wscript_line: msg_file_line += "%s:" % self.wscript_line err_message = "%s error: %s" % (msg_file_line, message) WafError.__init__(self, err_message) def locate_error(self): stack = traceback.extract_stack() stack.reverse() for frame in stack: file_name = os.path.basename(frame[0]) is_wscript = (file_name == WSCRIPT_FILE or file_name == WSCRIPT_BUILD_FILE) if is_wscript: return (frame[0], frame[1]) return (None, None) indicator = is_win32 and '\x1b[A\x1b[K%s%s%s\r' or '\x1b[K%s%s%s\r' try: from fnv import new as md5 import Constants Constants.SIG_NIL = 'signofnv' def h_file(filename): m = md5() try: m.hfile(filename) x = m.digest() if x is None: raise OSError("not a file") return x except SystemError: raise OSError("not a file" + filename) except ImportError: try: try: from hashlib import md5 except ImportError: from md5 import md5 def h_file(filename): f = open(filename, 'rb') m = md5() while (filename): filename = f.read(100000) m.update(filename) f.close() return m.digest() except ImportError: # portability fixes may be added elsewhere (although, md5 should be everywhere by now) md5 = None class ordered_dict(UserDict): def __init__(self, dict = None): self.allkeys = [] UserDict.__init__(self, dict) def __delitem__(self, key): self.allkeys.remove(key) UserDict.__delitem__(self, key) def __setitem__(self, key, item): if key not in self.allkeys: self.allkeys.append(key) UserDict.__setitem__(self, key, item) def exec_command(s, **kw): if 'log' in kw: kw['stdout'] = kw['stderr'] = kw['log'] del(kw['log']) kw['shell'] = isinstance(s, str) try: proc = pproc.Popen(s, **kw) return proc.wait() except OSError: return -1 if is_win32: def exec_command(s, **kw): if 'log' in kw: kw['stdout'] = kw['stderr'] = kw['log'] del(kw['log']) kw['shell'] = isinstance(s, str) if len(s) > 2000: startupinfo = pproc.STARTUPINFO() startupinfo.dwFlags |= pproc.STARTF_USESHOWWINDOW kw['startupinfo'] = startupinfo try: if 'stdout' not in kw: kw['stdout'] = pproc.PIPE kw['stderr'] = pproc.PIPE kw['universal_newlines'] = True proc = pproc.Popen(s,**kw) (stdout, stderr) = proc.communicate() Logs.info(stdout) if stderr: Logs.error(stderr) return proc.returncode else: proc = pproc.Popen(s,**kw) return proc.wait() except OSError: return -1 listdir = os.listdir if is_win32: def listdir_win32(s): if re.match('^[A-Za-z]:$', s): # os.path.isdir fails if s contains only the drive name... (x:) s += os.sep if not os.path.isdir(s): e = OSError() e.errno = errno.ENOENT raise e return os.listdir(s) listdir = listdir_win32 def waf_version(mini = 0x010000, maxi = 0x100000): "Halts if the waf version is wrong" ver = HEXVERSION try: min_val = mini + 0 except TypeError: min_val = int(mini.replace('.', '0'), 16) if min_val > ver: Logs.error("waf version should be at least %s (%s found)" % (mini, ver)) sys.exit(1) try: max_val = maxi + 0 except TypeError: max_val = int(maxi.replace('.', '0'), 16) if max_val < ver: Logs.error("waf version should be at most %s (%s found)" % (maxi, ver)) sys.exit(1) def python_24_guard(): if sys.hexversion < 0x20400f0 or sys.hexversion >= 0x3000000: raise ImportError("Waf requires Python >= 2.3 but the raw source requires Python 2.4, 2.5 or 2.6") def ex_stack(): exc_type, exc_value, tb = sys.exc_info() if Logs.verbose > 1: exc_lines = traceback.format_exception(exc_type, exc_value, tb) return ''.join(exc_lines) return str(exc_value) def to_list(sth): if isinstance(sth, str): return sth.split() else: return sth g_loaded_modules = {} "index modules by absolute path" g_module=None "the main module is special" def load_module(file_path, name=WSCRIPT_FILE): "this function requires an absolute path" try: return g_loaded_modules[file_path] except KeyError: pass module = imp.new_module(name) try: code = readf(file_path, m='rU') except (IOError, OSError): raise WscriptError('Could not read the file %r' % file_path) module.waf_hash_val = code dt = os.path.dirname(file_path) sys.path.insert(0, dt) try: exec(compile(code, file_path, 'exec'), module.__dict__) except Exception: exc_type, exc_value, tb = sys.exc_info() raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), file_path) sys.path.remove(dt) g_loaded_modules[file_path] = module return module def set_main_module(file_path): "Load custom options, if defined" global g_module g_module = load_module(file_path, 'wscript_main') g_module.root_path = file_path try: g_module.APPNAME except: g_module.APPNAME = 'noname' try: g_module.VERSION except: g_module.VERSION = '1.0' # note: to register the module globally, use the following: # sys.modules['wscript_main'] = g_module def to_hashtable(s): "used for importing env files" tbl = {} lst = s.split('\n') for line in lst: if not line: continue mems = line.split('=') tbl[mems[0]] = mems[1] return tbl def get_term_cols(): "console width" return 80 try: import struct, fcntl, termios except ImportError: pass else: if Logs.got_tty: def myfun(): dummy_lines, cols = struct.unpack("HHHH", \ fcntl.ioctl(sys.stderr.fileno(),termios.TIOCGWINSZ , \ struct.pack("HHHH", 0, 0, 0, 0)))[:2] return cols # we actually try the function once to see if it is suitable try: myfun() except: pass else: get_term_cols = myfun rot_idx = 0 rot_chr = ['\\', '|', '/', '-'] "the rotation character in the progress bar" def split_path(path): return path.split('/') def split_path_cygwin(path): if path.startswith('//'): ret = path.split('/')[2:] ret[0] = '/' + ret[0] return ret return path.split('/') re_sp = re.compile('[/\\\\]') def split_path_win32(path): if path.startswith('\\\\'): ret = re.split(re_sp, path)[2:] ret[0] = '\\' + ret[0] return ret return re.split(re_sp, path) if sys.platform == 'cygwin': split_path = split_path_cygwin elif is_win32: split_path = split_path_win32 def copy_attrs(orig, dest, names, only_if_set=False): for a in to_list(names): u = getattr(orig, a, ()) if u or not only_if_set: setattr(dest, a, u) def def_attrs(cls, **kw): ''' set attributes for class. @param cls [any class]: the class to update the given attributes in. @param kw [dictionary]: dictionary of attributes names and values. if the given class hasn't one (or more) of these attributes, add the attribute with its value to the class. ''' for k, v in kw.iteritems(): if not hasattr(cls, k): setattr(cls, k, v) def quote_define_name(path): fu = re.compile("[^a-zA-Z0-9]").sub("_", path) fu = fu.upper() return fu def quote_whitespace(path): return (path.strip().find(' ') > 0 and '"%s"' % path or path).replace('""', '"') def trimquotes(s): if not s: return '' s = s.rstrip() if s[0] == "'" and s[-1] == "'": return s[1:-1] return s def h_list(lst): m = md5() m.update(str(lst)) return m.digest() def h_fun(fun): try: return fun.code except AttributeError: try: h = inspect.getsource(fun) except IOError: h = "nocode" try: fun.code = h except AttributeError: pass return h def pprint(col, str, label='', sep='\n'): "print messages in color" sys.stderr.write("%s%s%s %s%s" % (Logs.colors(col), str, Logs.colors.NORMAL, label, sep)) def check_dir(dir): """If a folder doesn't exists, create it.""" try: os.lstat(dir) except OSError: try: os.makedirs(dir) except OSError, e: raise WafError("Cannot create folder '%s' (original error: %s)" % (dir, e)) def cmd_output(cmd, **kw): silent = False if 'silent' in kw: silent = kw['silent'] del(kw['silent']) if 'e' in kw: tmp = kw['e'] del(kw['e']) kw['env'] = tmp kw['shell'] = isinstance(cmd, str) kw['stdout'] = pproc.PIPE if silent: kw['stderr'] = pproc.PIPE try: p = pproc.Popen(cmd, **kw) output = p.communicate()[0] except OSError, e: raise ValueError(str(e)) if p.returncode: if not silent: msg = "command execution failed: %s -> %r" % (cmd, str(output)) raise ValueError(msg) output = '' return output reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}") def subst_vars(expr, params): "substitute ${PREFIX}/bin in /usr/local/bin" def repl_var(m): if m.group(1): return '\\' if m.group(2): return '$' try: # environments may contain lists return params.get_flat(m.group(3)) except AttributeError: return params[m.group(3)] return reg_subst.sub(repl_var, expr) def unversioned_sys_platform_to_binary_format(unversioned_sys_platform): "infers the binary format from the unversioned_sys_platform name." if unversioned_sys_platform in ('linux', 'freebsd', 'netbsd', 'openbsd', 'sunos', 'gnu'): return 'elf' elif unversioned_sys_platform == 'darwin': return 'mac-o' elif unversioned_sys_platform in ('win32', 'cygwin', 'uwin', 'msys'): return 'pe' # TODO we assume all other operating systems are elf, which is not true. # we may set this to 'unknown' and have ccroot and other tools handle the case "gracefully" (whatever that means). return 'elf' def unversioned_sys_platform(): """returns an unversioned name from sys.platform. sys.plaform is not very well defined and depends directly on the python source tree. The version appended to the names is unreliable as it's taken from the build environment at the time python was built, i.e., it's possible to get freebsd7 on a freebsd8 system. So we remove the version from the name, except for special cases where the os has a stupid name like os2 or win32. Some possible values of sys.platform are, amongst others: aix3 aix4 atheos beos5 darwin freebsd2 freebsd3 freebsd4 freebsd5 freebsd6 freebsd7 generic gnu0 irix5 irix6 linux2 mac netbsd1 next3 os2emx riscos sunos5 unixware7 Investigating the python source tree may reveal more values. """ s = sys.platform if s == 'java': # The real OS is hidden under the JVM. from java.lang import System s = System.getProperty('os.name') # see http://lopica.sourceforge.net/os.html for a list of possible values if s == 'Mac OS X': return 'darwin' elif s.startswith('Windows '): return 'win32' elif s == 'OS/2': return 'os2' elif s == 'HP-UX': return 'hpux' elif s in ('SunOS', 'Solaris'): return 'sunos' else: s = s.lower() if s == 'win32' or s.endswith('os2') and s != 'sunos2': return s return re.split('\d+$', s)[0] #@deprecated('use unversioned_sys_platform instead') def detect_platform(): """this function has been in the Utils module for some time. It's hard to guess what people have used it for. It seems its goal is to return an unversionned sys.platform, but it's not handling all platforms. For example, the version is not removed on freebsd and netbsd, amongst others. """ s = sys.platform # known POSIX for x in 'cygwin linux irix sunos hpux aix darwin gnu'.split(): # sys.platform may be linux2 if s.find(x) >= 0: return x # unknown POSIX if os.name in 'posix java os2'.split(): return os.name return s def load_tool(tool, tooldir=None): ''' load_tool: import a Python module, optionally using several directories. @param tool [string]: name of tool to import. @param tooldir [list]: directories to look for the tool. @return: the loaded module. Warning: this function is not thread-safe: plays with sys.path, so must run in sequence. ''' if tooldir: assert isinstance(tooldir, list) sys.path = tooldir + sys.path else: tooldir = [] try: return __import__(tool) finally: for dt in tooldir: sys.path.remove(dt) def readf(fname, m='r'): "get the contents of a file, it is not used anywhere for the moment" f = open(fname, m) try: txt = f.read() finally: f.close() return txt def nada(*k, **kw): """A function that does nothing""" pass def diff_path(top, subdir): """difference between two absolute paths""" top = os.path.normpath(top).replace('\\', '/').split('/') subdir = os.path.normpath(subdir).replace('\\', '/').split('/') if len(top) == len(subdir): return '' diff = subdir[len(top) - len(subdir):] return os.path.join(*diff) class Context(object): """A base class for commands to be executed from Waf scripts""" def set_curdir(self, dir): self.curdir_ = dir def get_curdir(self): try: return self.curdir_ except AttributeError: self.curdir_ = os.getcwd() return self.get_curdir() curdir = property(get_curdir, set_curdir) def recurse(self, dirs, name=''): """The function for calling scripts from folders, it tries to call wscript + function_name and if that file does not exist, it will call the method 'function_name' from a file named wscript the dirs can be a list of folders or a string containing space-separated folder paths """ if not name: name = inspect.stack()[1][3] if isinstance(dirs, str): dirs = to_list(dirs) for x in dirs: if os.path.isabs(x): nexdir = x else: nexdir = os.path.join(self.curdir, x) base = os.path.join(nexdir, WSCRIPT_FILE) file_path = base + '_' + name try: txt = readf(file_path, m='rU') except (OSError, IOError): try: module = load_module(base) except OSError: raise WscriptError('No such script %s' % base) try: f = module.__dict__[name] except KeyError: raise WscriptError('No function %s defined in %s' % (name, base)) if getattr(self.__class__, 'pre_recurse', None): self.pre_recurse(f, base, nexdir) old = self.curdir self.curdir = nexdir try: f(self) finally: self.curdir = old if getattr(self.__class__, 'post_recurse', None): self.post_recurse(module, base, nexdir) else: dc = {'ctx': self} if getattr(self.__class__, 'pre_recurse', None): dc = self.pre_recurse(txt, file_path, nexdir) old = self.curdir self.curdir = nexdir try: try: exec(compile(txt, file_path, 'exec'), dc) except Exception: exc_type, exc_value, tb = sys.exc_info() raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), base) finally: self.curdir = old if getattr(self.__class__, 'post_recurse', None): self.post_recurse(txt, file_path, nexdir) if is_win32: old = shutil.copy2 def copy2(src, dst): old(src, dst) shutil.copystat(src, src) setattr(shutil, 'copy2', copy2) def zip_folder(dir, zip_file_name, prefix): """ prefix represents the app to add in the archive """ import zipfile zip = zipfile.ZipFile(zip_file_name, 'w', compression=zipfile.ZIP_DEFLATED) base = os.path.abspath(dir) if prefix: if prefix[-1] != os.sep: prefix += os.sep n = len(base) for root, dirs, files in os.walk(base): for f in files: archive_name = prefix + root[n:] + os.sep + f zip.write(root + os.sep + f, archive_name, zipfile.ZIP_DEFLATED) zip.close() def get_elapsed_time(start): "Format a time delta (datetime.timedelta) using the format DdHhMmS.MSs" delta = datetime.datetime.now() - start # cast to int necessary for python 3.0 days = int(delta.days) hours = int(delta.seconds / 3600) minutes = int((delta.seconds - hours * 3600) / 60) seconds = delta.seconds - hours * 3600 - minutes * 60 \ + float(delta.microseconds) / 1000 / 1000 result = '' if days: result += '%dd' % days if days or hours: result += '%dh' % hours if days or hours or minutes: result += '%dm' % minutes return '%s%.3fs' % (result, seconds) if os.name == 'java': # For Jython (they should really fix the inconsistency) try: gc.disable() gc.enable() except NotImplementedError: gc.disable = gc.enable def run_once(fun): """ decorator, make a function cache its results, use like this: @run_once def foo(k): return 345*2343 """ cache = {} def wrap(k): try: return cache[k] except KeyError: ret = fun(k) cache[k] = ret return ret wrap.__cache__ = cache return wrap ntdb-1.0/buildtools/wafadmin/__init__.py000066400000000000000000000001021224151530700203510ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005 (ita) ntdb-1.0/buildtools/wafadmin/ansiterm.py000066400000000000000000000175241224151530700204540ustar00rootroot00000000000000import sys, os try: if (not sys.stderr.isatty()) or (not sys.stdout.isatty()): raise ValueError('not a tty') from ctypes import * class COORD(Structure): _fields_ = [("X", c_short), ("Y", c_short)] class SMALL_RECT(Structure): _fields_ = [("Left", c_short), ("Top", c_short), ("Right", c_short), ("Bottom", c_short)] class CONSOLE_SCREEN_BUFFER_INFO(Structure): _fields_ = [("Size", COORD), ("CursorPosition", COORD), ("Attributes", c_short), ("Window", SMALL_RECT), ("MaximumWindowSize", COORD)] class CONSOLE_CURSOR_INFO(Structure): _fields_ = [('dwSize',c_ulong), ('bVisible', c_int)] sbinfo = CONSOLE_SCREEN_BUFFER_INFO() csinfo = CONSOLE_CURSOR_INFO() hconsole = windll.kernel32.GetStdHandle(-11) windll.kernel32.GetConsoleScreenBufferInfo(hconsole, byref(sbinfo)) if sbinfo.Size.X < 10 or sbinfo.Size.Y < 10: raise Exception('small console') windll.kernel32.GetConsoleCursorInfo(hconsole, byref(csinfo)) except Exception: pass else: import re, threading to_int = lambda number, default: number and int(number) or default wlock = threading.Lock() STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 class AnsiTerm(object): def __init__(self): self.hconsole = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE) self.cursor_history = [] self.orig_sbinfo = CONSOLE_SCREEN_BUFFER_INFO() self.orig_csinfo = CONSOLE_CURSOR_INFO() windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(self.orig_sbinfo)) windll.kernel32.GetConsoleCursorInfo(hconsole, byref(self.orig_csinfo)) def screen_buffer_info(self): sbinfo = CONSOLE_SCREEN_BUFFER_INFO() windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(sbinfo)) return sbinfo def clear_line(self, param): mode = param and int(param) or 0 sbinfo = self.screen_buffer_info() if mode == 1: # Clear from begining of line to cursor position line_start = COORD(0, sbinfo.CursorPosition.Y) line_length = sbinfo.Size.X elif mode == 2: # Clear entire line line_start = COORD(sbinfo.CursorPosition.X, sbinfo.CursorPosition.Y) line_length = sbinfo.Size.X - sbinfo.CursorPosition.X else: # Clear from cursor position to end of line line_start = sbinfo.CursorPosition line_length = sbinfo.Size.X - sbinfo.CursorPosition.X chars_written = c_int() windll.kernel32.FillConsoleOutputCharacterA(self.hconsole, c_char(' '), line_length, line_start, byref(chars_written)) windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, line_length, line_start, byref(chars_written)) def clear_screen(self, param): mode = to_int(param, 0) sbinfo = self.screen_buffer_info() if mode == 1: # Clear from begining of screen to cursor position clear_start = COORD(0, 0) clear_length = sbinfo.CursorPosition.X * sbinfo.CursorPosition.Y elif mode == 2: # Clear entire screen and return cursor to home clear_start = COORD(0, 0) clear_length = sbinfo.Size.X * sbinfo.Size.Y windll.kernel32.SetConsoleCursorPosition(self.hconsole, clear_start) else: # Clear from cursor position to end of screen clear_start = sbinfo.CursorPosition clear_length = ((sbinfo.Size.X - sbinfo.CursorPosition.X) + sbinfo.Size.X * (sbinfo.Size.Y - sbinfo.CursorPosition.Y)) chars_written = c_int() windll.kernel32.FillConsoleOutputCharacterA(self.hconsole, c_char(' '), clear_length, clear_start, byref(chars_written)) windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, clear_length, clear_start, byref(chars_written)) def push_cursor(self, param): sbinfo = self.screen_buffer_info() self.cursor_history.push(sbinfo.CursorPosition) def pop_cursor(self, param): if self.cursor_history: old_pos = self.cursor_history.pop() windll.kernel32.SetConsoleCursorPosition(self.hconsole, old_pos) def set_cursor(self, param): x, sep, y = param.partition(';') x = to_int(x, 1) - 1 y = to_int(y, 1) - 1 sbinfo = self.screen_buffer_info() new_pos = COORD( min(max(0, x), sbinfo.Size.X), min(max(0, y), sbinfo.Size.Y) ) windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) def set_column(self, param): x = to_int(param, 1) - 1 sbinfo = self.screen_buffer_info() new_pos = COORD( min(max(0, x), sbinfo.Size.X), sbinfo.CursorPosition.Y ) windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) def move_cursor(self, x_offset=0, y_offset=0): sbinfo = self.screen_buffer_info() new_pos = COORD( min(max(0, sbinfo.CursorPosition.X + x_offset), sbinfo.Size.X), min(max(0, sbinfo.CursorPosition.Y + y_offset), sbinfo.Size.Y) ) windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) def move_up(self, param): self.move_cursor(y_offset = -to_int(param, 1)) def move_down(self, param): self.move_cursor(y_offset = to_int(param, 1)) def move_left(self, param): self.move_cursor(x_offset = -to_int(param, 1)) def move_right(self, param): self.move_cursor(x_offset = to_int(param, 1)) def next_line(self, param): sbinfo = self.screen_buffer_info() self.move_cursor( x_offset = -sbinfo.CursorPosition.X, y_offset = to_int(param, 1) ) def prev_line(self, param): sbinfo = self.screen_buffer_info() self.move_cursor( x_offset = -sbinfo.CursorPosition.X, y_offset = -to_int(param, 1) ) escape_to_color = { (0, 30): 0x0, #black (0, 31): 0x4, #red (0, 32): 0x2, #green (0, 33): 0x4+0x2, #dark yellow (0, 34): 0x1, #blue (0, 35): 0x1+0x4, #purple (0, 36): 0x2+0x4, #cyan (0, 37): 0x1+0x2+0x4, #grey (1, 30): 0x1+0x2+0x4, #dark gray (1, 31): 0x4+0x8, #red (1, 32): 0x2+0x8, #light green (1, 33): 0x4+0x2+0x8, #yellow (1, 34): 0x1+0x8, #light blue (1, 35): 0x1+0x4+0x8, #light purple (1, 36): 0x1+0x2+0x8, #light cyan (1, 37): 0x1+0x2+0x4+0x8, #white } def set_color(self, param): cols = param.split(';') attr = self.orig_sbinfo.Attributes for c in cols: c = to_int(c, 0) if c in range(30,38): attr = (attr & 0xf0) | (self.escape_to_color.get((0,c), 0x7)) elif c in range(40,48): attr = (attr & 0x0f) | (self.escape_to_color.get((0,c), 0x7) << 8) elif c in range(90,98): attr = (attr & 0xf0) | (self.escape_to_color.get((1,c-60), 0x7)) elif c in range(100,108): attr = (attr & 0x0f) | (self.escape_to_color.get((1,c-60), 0x7) << 8) elif c == 1: attr |= 0x08 windll.kernel32.SetConsoleTextAttribute(self.hconsole, attr) def show_cursor(self,param): csinfo.bVisible = 1 windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(csinfo)) def hide_cursor(self,param): csinfo.bVisible = 0 windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(csinfo)) ansi_command_table = { 'A': move_up, 'B': move_down, 'C': move_right, 'D': move_left, 'E': next_line, 'F': prev_line, 'G': set_column, 'H': set_cursor, 'f': set_cursor, 'J': clear_screen, 'K': clear_line, 'h': show_cursor, 'l': hide_cursor, 'm': set_color, 's': push_cursor, 'u': pop_cursor, } # Match either the escape sequence or text not containing escape sequence ansi_tokans = re.compile('(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))') def write(self, text): try: wlock.acquire() for param, cmd, txt in self.ansi_tokans.findall(text): if cmd: cmd_func = self.ansi_command_table.get(cmd) if cmd_func: cmd_func(self, param) else: chars_written = c_int() if isinstance(txt, unicode): windll.kernel32.WriteConsoleW(self.hconsole, txt, len(txt), byref(chars_written), None) else: windll.kernel32.WriteConsoleA(self.hconsole, txt, len(txt), byref(chars_written), None) finally: wlock.release() def flush(self): pass def isatty(self): return True sys.stderr = sys.stdout = AnsiTerm() os.environ['TERM'] = 'vt100' ntdb-1.0/buildtools/wafadmin/pproc.py000066400000000000000000000510371224151530700177520ustar00rootroot00000000000000# borrowed from python 2.5.2c1 # Copyright (c) 2003-2005 by Peter Astrand # Licensed to PSF under a Contributor Agreement. import sys mswindows = (sys.platform == "win32") import os import types import traceback import gc class CalledProcessError(Exception): def __init__(self, returncode, cmd): self.returncode = returncode self.cmd = cmd def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) if mswindows: import threading import msvcrt if 0: import pywintypes from win32api import GetStdHandle, STD_INPUT_HANDLE, \ STD_OUTPUT_HANDLE, STD_ERROR_HANDLE from win32api import GetCurrentProcess, DuplicateHandle, \ GetModuleFileName, GetVersion from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE from win32pipe import CreatePipe from win32process import CreateProcess, STARTUPINFO, \ GetExitCodeProcess, STARTF_USESTDHANDLES, \ STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0 else: from _subprocess import * class STARTUPINFO: dwFlags = 0 hStdInput = None hStdOutput = None hStdError = None wShowWindow = 0 class pywintypes: error = IOError else: import select import errno import fcntl import pickle __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"] try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 try: False except NameError: False = 0 True = 1 _active = [] def _cleanup(): for inst in _active[:]: if inst.poll(_deadstate=sys.maxint) >= 0: try: _active.remove(inst) except ValueError: pass PIPE = -1 STDOUT = -2 def call(*popenargs, **kwargs): return Popen(*popenargs, **kwargs).wait() def check_call(*popenargs, **kwargs): retcode = call(*popenargs, **kwargs) cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] if retcode: raise CalledProcessError(retcode, cmd) return retcode def list2cmdline(seq): result = [] needquote = False for arg in seq: bs_buf = [] if result: result.append(' ') needquote = (" " in arg) or ("\t" in arg) or arg == "" if needquote: result.append('"') for c in arg: if c == '\\': bs_buf.append(c) elif c == '"': result.append('\\' * len(bs_buf)*2) bs_buf = [] result.append('\\"') else: if bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) if bs_buf: result.extend(bs_buf) if needquote: result.extend(bs_buf) result.append('"') return ''.join(result) class Popen(object): def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0): _cleanup() self._child_created = False if not isinstance(bufsize, (int, long)): raise TypeError("bufsize must be an integer") if mswindows: if preexec_fn is not None: raise ValueError("preexec_fn is not supported on Windows platforms") if close_fds: raise ValueError("close_fds is not supported on Windows platforms") else: if startupinfo is not None: raise ValueError("startupinfo is only supported on Windows platforms") if creationflags != 0: raise ValueError("creationflags is only supported on Windows platforms") self.stdin = None self.stdout = None self.stderr = None self.pid = None self.returncode = None self.universal_newlines = universal_newlines (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) self._execute_child(args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) if mswindows: if stdin is None and p2cwrite is not None: os.close(p2cwrite) p2cwrite = None if stdout is None and c2pread is not None: os.close(c2pread) c2pread = None if stderr is None and errread is not None: os.close(errread) errread = None if p2cwrite: self.stdin = os.fdopen(p2cwrite, 'wb', bufsize) if c2pread: if universal_newlines: self.stdout = os.fdopen(c2pread, 'rU', bufsize) else: self.stdout = os.fdopen(c2pread, 'rb', bufsize) if errread: if universal_newlines: self.stderr = os.fdopen(errread, 'rU', bufsize) else: self.stderr = os.fdopen(errread, 'rb', bufsize) def _translate_newlines(self, data): data = data.replace("\r\n", "\n") data = data.replace("\r", "\n") return data def __del__(self, sys=sys): if not self._child_created: return self.poll(_deadstate=sys.maxint) if self.returncode is None and _active is not None: _active.append(self) def communicate(self, input=None): if [self.stdin, self.stdout, self.stderr].count(None) >= 2: stdout = None stderr = None if self.stdin: if input: self.stdin.write(input) self.stdin.close() elif self.stdout: stdout = self.stdout.read() elif self.stderr: stderr = self.stderr.read() self.wait() return (stdout, stderr) return self._communicate(input) if mswindows: def _get_handles(self, stdin, stdout, stderr): if stdin is None and stdout is None and stderr is None: return (None, None, None, None, None, None) p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin is None: p2cread = GetStdHandle(STD_INPUT_HANDLE) if p2cread is not None: pass elif stdin is None or stdin == PIPE: p2cread, p2cwrite = CreatePipe(None, 0) p2cwrite = p2cwrite.Detach() p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0) elif isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) else: p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) if stdout is None: c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE) if c2pwrite is not None: pass elif stdout is None or stdout == PIPE: c2pread, c2pwrite = CreatePipe(None, 0) c2pread = c2pread.Detach() c2pread = msvcrt.open_osfhandle(c2pread, 0) elif isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) else: c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) if stderr is None: errwrite = GetStdHandle(STD_ERROR_HANDLE) if errwrite is not None: pass elif stderr is None or stderr == PIPE: errread, errwrite = CreatePipe(None, 0) errread = errread.Detach() errread = msvcrt.open_osfhandle(errread, 0) elif stderr == STDOUT: errwrite = c2pwrite elif isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) else: errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _make_inheritable(self, handle): return DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), 0, 1, DUPLICATE_SAME_ACCESS) def _find_w9xpopen(self): w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)), "w9xpopen.exe") if not os.path.exists(w9xpopen): w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix), "w9xpopen.exe") if not os.path.exists(w9xpopen): raise RuntimeError("Cannot locate w9xpopen.exe, which is needed for Popen to work with your shell or platform.") return w9xpopen def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): if not isinstance(args, types.StringTypes): args = list2cmdline(args) if startupinfo is None: startupinfo = STARTUPINFO() if None not in (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= STARTF_USESTDHANDLES startupinfo.hStdInput = p2cread startupinfo.hStdOutput = c2pwrite startupinfo.hStdError = errwrite if shell: startupinfo.dwFlags |= STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = comspec + " /c " + args if (GetVersion() >= 0x80000000L or os.path.basename(comspec).lower() == "command.com"): w9xpopen = self._find_w9xpopen() args = '"%s" %s' % (w9xpopen, args) creationflags |= CREATE_NEW_CONSOLE try: hp, ht, pid, tid = CreateProcess(executable, args, None, None, 1, creationflags, env, cwd, startupinfo) except pywintypes.error, e: raise WindowsError(*e.args) self._child_created = True self._handle = hp self.pid = pid ht.Close() if p2cread is not None: p2cread.Close() if c2pwrite is not None: c2pwrite.Close() if errwrite is not None: errwrite.Close() def poll(self, _deadstate=None): if self.returncode is None: if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0: self.returncode = GetExitCodeProcess(self._handle) return self.returncode def wait(self): if self.returncode is None: obj = WaitForSingleObject(self._handle, INFINITE) self.returncode = GetExitCodeProcess(self._handle) return self.returncode def _readerthread(self, fh, buffer): buffer.append(fh.read()) def _communicate(self, input): stdout = None stderr = None if self.stdout: stdout = [] stdout_thread = threading.Thread(target=self._readerthread, args=(self.stdout, stdout)) stdout_thread.setDaemon(True) stdout_thread.start() if self.stderr: stderr = [] stderr_thread = threading.Thread(target=self._readerthread, args=(self.stderr, stderr)) stderr_thread.setDaemon(True) stderr_thread.start() if self.stdin: if input is not None: self.stdin.write(input) self.stdin.close() if self.stdout: stdout_thread.join() if self.stderr: stderr_thread.join() if stdout is not None: stdout = stdout[0] if stderr is not None: stderr = stderr[0] if self.universal_newlines and hasattr(file, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr) else: def _get_handles(self, stdin, stdout, stderr): p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin is None: pass elif stdin == PIPE: p2cread, p2cwrite = os.pipe() elif isinstance(stdin, int): p2cread = stdin else: p2cread = stdin.fileno() if stdout is None: pass elif stdout == PIPE: c2pread, c2pwrite = os.pipe() elif isinstance(stdout, int): c2pwrite = stdout else: c2pwrite = stdout.fileno() if stderr is None: pass elif stderr == PIPE: errread, errwrite = os.pipe() elif stderr == STDOUT: errwrite = c2pwrite elif isinstance(stderr, int): errwrite = stderr else: errwrite = stderr.fileno() return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _set_cloexec_flag(self, fd): try: cloexec_flag = fcntl.FD_CLOEXEC except AttributeError: cloexec_flag = 1 old = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag) def _close_fds(self, but): for i in xrange(3, MAXFD): if i == but: continue try: os.close(i) except: pass def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): if isinstance(args, types.StringTypes): args = [args] else: args = list(args) if shell: args = ["/bin/sh", "-c"] + args if executable is None: executable = args[0] errpipe_read, errpipe_write = os.pipe() self._set_cloexec_flag(errpipe_write) gc_was_enabled = gc.isenabled() gc.disable() try: self.pid = os.fork() except: if gc_was_enabled: gc.enable() raise self._child_created = True if self.pid == 0: try: if p2cwrite: os.close(p2cwrite) if c2pread: os.close(c2pread) if errread: os.close(errread) os.close(errpipe_read) if p2cread: os.dup2(p2cread, 0) if c2pwrite: os.dup2(c2pwrite, 1) if errwrite: os.dup2(errwrite, 2) if p2cread and p2cread not in (0,): os.close(p2cread) if c2pwrite and c2pwrite not in (p2cread, 1): os.close(c2pwrite) if errwrite and errwrite not in (p2cread, c2pwrite, 2): os.close(errwrite) if close_fds: self._close_fds(but=errpipe_write) if cwd is not None: os.chdir(cwd) if preexec_fn: apply(preexec_fn) if env is None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() exc_lines = traceback.format_exception(exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) os.write(errpipe_write, pickle.dumps(exc_value)) os._exit(255) if gc_was_enabled: gc.enable() os.close(errpipe_write) if p2cread and p2cwrite: os.close(p2cread) if c2pwrite and c2pread: os.close(c2pwrite) if errwrite and errread: os.close(errwrite) data = os.read(errpipe_read, 1048576) os.close(errpipe_read) if data != "": os.waitpid(self.pid, 0) child_exception = pickle.loads(data) raise child_exception def _handle_exitstatus(self, sts): if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) elif os.WIFEXITED(sts): self.returncode = os.WEXITSTATUS(sts) else: raise RuntimeError("Unknown child exit status!") def poll(self, _deadstate=None): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, os.WNOHANG) if pid == self.pid: self._handle_exitstatus(sts) except os.error: if _deadstate is not None: self.returncode = _deadstate return self.returncode def wait(self): if self.returncode is None: pid, sts = os.waitpid(self.pid, 0) self._handle_exitstatus(sts) return self.returncode def _communicate(self, input): read_set = [] write_set = [] stdout = None stderr = None if self.stdin: self.stdin.flush() if input: write_set.append(self.stdin) else: self.stdin.close() if self.stdout: read_set.append(self.stdout) stdout = [] if self.stderr: read_set.append(self.stderr) stderr = [] input_offset = 0 while read_set or write_set: rlist, wlist, xlist = select.select(read_set, write_set, []) if self.stdin in wlist: bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512)) input_offset += bytes_written if input_offset >= len(input): self.stdin.close() write_set.remove(self.stdin) if self.stdout in rlist: data = os.read(self.stdout.fileno(), 1024) if data == "": self.stdout.close() read_set.remove(self.stdout) stdout.append(data) if self.stderr in rlist: data = os.read(self.stderr.fileno(), 1024) if data == "": self.stderr.close() read_set.remove(self.stderr) stderr.append(data) if stdout is not None: stdout = ''.join(stdout) if stderr is not None: stderr = ''.join(stderr) if self.universal_newlines and hasattr(file, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr) ntdb-1.0/buildtools/wafadmin/py3kfixes.py000066400000000000000000000074451224151530700205600ustar00rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2009 (ita) """ Fixes for py3k go here """ import os all_modifs = {} def modif(dir, name, fun): if name == '*': lst = [] for y in '. Tools 3rdparty'.split(): for x in os.listdir(os.path.join(dir, y)): if x.endswith('.py'): lst.append(y + os.sep + x) #lst = [y + os.sep + x for x in os.listdir(os.path.join(dir, y)) for y in '. Tools 3rdparty'.split() if x.endswith('.py')] for x in lst: modif(dir, x, fun) return filename = os.path.join(dir, name) f = open(filename, 'r') txt = f.read() f.close() txt = fun(txt) f = open(filename, 'w') f.write(txt) f.close() def subst(filename): def do_subst(fun): global all_modifs try: all_modifs[filename] += fun except KeyError: all_modifs[filename] = [fun] return fun return do_subst @subst('Constants.py') def r1(code): code = code.replace("'iluvcuteoverload'", "b'iluvcuteoverload'") code = code.replace("ABI=7", "ABI=37") return code @subst('Tools/ccroot.py') def r2(code): code = code.replace("p.stdin.write('\\n')", "p.stdin.write(b'\\n')") code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")') return code @subst('Utils.py') def r3(code): code = code.replace("m.update(str(lst))", "m.update(str(lst).encode())") code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")') return code @subst('ansiterm.py') def r33(code): code = code.replace('unicode', 'str') return code @subst('Task.py') def r4(code): code = code.replace("up(self.__class__.__name__)", "up(self.__class__.__name__.encode())") code = code.replace("up(self.env.variant())", "up(self.env.variant().encode())") code = code.replace("up(x.parent.abspath())", "up(x.parent.abspath().encode())") code = code.replace("up(x.name)", "up(x.name.encode())") code = code.replace('class TaskBase(object):\n\t__metaclass__=store_task_type', 'import binascii\n\nclass TaskBase(object, metaclass=store_task_type):') code = code.replace('keys=self.cstr_groups.keys()', 'keys=list(self.cstr_groups.keys())') code = code.replace("sig.encode('hex')", 'binascii.hexlify(sig)') code = code.replace("os.path.join(Options.cache_global,ssig)", "os.path.join(Options.cache_global,ssig.decode())") return code @subst('Build.py') def r5(code): code = code.replace("cPickle.dump(data,file,-1)", "cPickle.dump(data,file)") code = code.replace('for node in src_dir_node.childs.values():', 'for node in list(src_dir_node.childs.values()):') return code @subst('*') def r6(code): code = code.replace('xrange', 'range') code = code.replace('iteritems', 'items') code = code.replace('maxint', 'maxsize') code = code.replace('iterkeys', 'keys') code = code.replace('Error,e:', 'Error as e:') code = code.replace('Exception,e:', 'Exception as e:') return code @subst('TaskGen.py') def r7(code): code = code.replace('class task_gen(object):\n\t__metaclass__=register_obj', 'class task_gen(object, metaclass=register_obj):') return code @subst('Tools/python.py') def r8(code): code = code.replace('proc.communicate()[0]', 'proc.communicate()[0].decode("utf-8")') return code @subst('Tools/glib2.py') def r9(code): code = code.replace('f.write(c)', 'f.write(c.encode("utf-8"))') return code @subst('Tools/config_c.py') def r10(code): code = code.replace("key=kw['success']", "key=kw['success']\n\t\t\t\ttry:\n\t\t\t\t\tkey=key.decode('utf-8')\n\t\t\t\texcept:\n\t\t\t\t\tpass") code = code.replace('out=str(out)','out=out.decode("utf-8")') code = code.replace('err=str(err)','err=err.decode("utf-8")') return code @subst('Tools/d.py') def r11(code): code = code.replace('ret.strip()', 'ret.strip().decode("utf-8")') return code def fixdir(dir): global all_modifs for k in all_modifs: for v in all_modifs[k]: modif(os.path.join(dir, 'wafadmin'), k, v) #print('substitutions finished') ntdb-1.0/buildtools/wafsamba/000077500000000000000000000000001224151530700162425ustar00rootroot00000000000000ntdb-1.0/buildtools/wafsamba/README000066400000000000000000000003771224151530700171310ustar00rootroot00000000000000This is a set of waf 'tools' to help make building the Samba components easier, by having common functions in one place. This gives us a more consistent build, and ensures that our project rules are obeyed TODO: see http://wiki.samba.org/index.php/Waf ntdb-1.0/buildtools/wafsamba/__init__.py000066400000000000000000000000001224151530700203410ustar00rootroot00000000000000ntdb-1.0/buildtools/wafsamba/configure_file.py000066400000000000000000000024401224151530700215740ustar00rootroot00000000000000# handle substitution of variables in .in files import Build, sys, Logs from samba_utils import * def subst_at_vars(task): '''substiture @VAR@ style variables in a file''' env = task.env src = task.inputs[0].srcpath(env) tgt = task.outputs[0].bldpath(env) f = open(src, 'r') s = f.read() f.close() # split on the vars a = re.split('(@\w+@)', s) out = [] for v in a: if re.match('@\w+@', v): vname = v[1:-1] if not vname in task.env and vname.upper() in task.env: vname = vname.upper() if not vname in task.env: Logs.error("Unknown substitution %s in %s" % (v, task.name)) sys.exit(1) v = SUBST_VARS_RECURSIVE(task.env[vname], task.env) out.append(v) contents = ''.join(out) f = open(tgt, 'w') s = f.write(contents) f.close() return 0 def CONFIGURE_FILE(bld, in_file, **kwargs): '''configure file''' base=os.path.basename(in_file) t = bld.SAMBA_GENERATOR('INFILE_%s' % base, rule = subst_at_vars, source = in_file + '.in', target = in_file, vars = kwargs) Build.BuildContext.CONFIGURE_FILE = CONFIGURE_FILE ntdb-1.0/buildtools/wafsamba/gccdeps.py000066400000000000000000000064261224151530700202340ustar00rootroot00000000000000# encoding: utf-8 # Thomas Nagy, 2008-2010 (ita) """ Execute the tasks with gcc -MD, read the dependencies from the .d file and prepare the dependency calculation for the next run """ import os, re, threading import Task, Logs, Utils, preproc from TaskGen import before, after, feature lock = threading.Lock() preprocessor_flag = '-MD' @feature('cc') @before('apply_core') def add_mmd_cc(self): if self.env.get_flat('CCFLAGS').find(preprocessor_flag) < 0: self.env.append_value('CCFLAGS', preprocessor_flag) @feature('cxx') @before('apply_core') def add_mmd_cxx(self): if self.env.get_flat('CXXFLAGS').find(preprocessor_flag) < 0: self.env.append_value('CXXFLAGS', preprocessor_flag) def scan(self): "the scanner does not do anything initially" nodes = self.generator.bld.node_deps.get(self.unique_id(), []) names = [] return (nodes, names) re_o = re.compile("\.o$") re_src = re.compile("^(\.\.)[\\/](.*)$") def post_run(self): # The following code is executed by threads, it is not safe, so a lock is needed... if getattr(self, 'cached', None): return Task.Task.post_run(self) name = self.outputs[0].abspath(self.env) name = re_o.sub('.d', name) txt = Utils.readf(name) #os.unlink(name) txt = txt.replace('\\\n', '') lst = txt.strip().split(':') val = ":".join(lst[1:]) val = val.split() nodes = [] bld = self.generator.bld f = re.compile("^("+self.env.variant()+"|\.\.)[\\/](.*)$") for x in val: if os.path.isabs(x): if not preproc.go_absolute: continue lock.acquire() try: node = bld.root.find_resource(x) finally: lock.release() else: g = re.search(re_src, x) if g: x = g.group(2) lock.acquire() try: node = bld.bldnode.parent.find_resource(x) finally: lock.release() else: g = re.search(f, x) if g: x = g.group(2) lock.acquire() try: node = bld.srcnode.find_resource(x) finally: lock.release() if id(node) == id(self.inputs[0]): # ignore the source file, it is already in the dependencies # this way, successful config tests may be retrieved from the cache continue if not node: raise ValueError('could not find %r for %r' % (x, self)) else: nodes.append(node) Logs.debug('deps: real scanner for %s returned %s' % (str(self), str(nodes))) bld.node_deps[self.unique_id()] = nodes bld.raw_deps[self.unique_id()] = [] try: del self.cache_sig except: pass Task.Task.post_run(self) import Constants, Utils def sig_implicit_deps(self): try: return Task.Task.sig_implicit_deps(self) except Utils.WafError: return Constants.SIG_NIL for name in 'cc cxx'.split(): try: cls = Task.TaskBase.classes[name] except KeyError: pass else: cls.post_run = post_run cls.scan = scan cls.sig_implicit_deps = sig_implicit_deps ntdb-1.0/buildtools/wafsamba/generic_cc.py000066400000000000000000000034271224151530700207030ustar00rootroot00000000000000 # compiler definition for a generic C compiler # based on suncc.py from waf import os, optparse import Utils, Options, Configure import ccroot, ar from Configure import conftest from compiler_cc import c_compiler c_compiler['default'] = ['gcc', 'generic_cc'] c_compiler['hpux'] = ['gcc', 'generic_cc'] @conftest def find_generic_cc(conf): v = conf.env cc = None if v['CC']: cc = v['CC'] elif 'CC' in conf.environ: cc = conf.environ['CC'] if not cc: cc = conf.find_program('cc', var='CC') if not cc: conf.fatal('generic_cc was not found') cc = conf.cmd_to_list(cc) v['CC'] = cc v['CC_NAME'] = 'generic' @conftest def generic_cc_common_flags(conf): v = conf.env v['CC_SRC_F'] = '' v['CC_TGT_F'] = ['-c', '-o', ''] v['CPPPATH_ST'] = '-I%s' # template for adding include paths # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = '' v['CCLNK_TGT_F'] = ['-o', ''] v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STATICLIB_ST'] = '-l%s' v['STATICLIBPATH_ST'] = '-L%s' v['CCDEFINES_ST'] = '-D%s' # v['SONAME_ST'] = '-Wl,-h -Wl,%s' # v['SHLIB_MARKER'] = '-Bdynamic' # v['STATICLIB_MARKER'] = '-Bstatic' # program v['program_PATTERN'] = '%s' # shared library # v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC'] # v['shlib_LINKFLAGS'] = ['-G'] v['shlib_PATTERN'] = 'lib%s.so' # static lib # v['staticlib_LINKFLAGS'] = ['-Bstatic'] # v['staticlib_PATTERN'] = 'lib%s.a' detect = ''' find_generic_cc find_cpp find_ar generic_cc_common_flags cc_load_tools cc_add_flags link_add_flags ''' ntdb-1.0/buildtools/wafsamba/hpuxcc.py000066400000000000000000000031301224151530700201030ustar00rootroot00000000000000# compiler definition for HPUX # based on suncc.py from waf import os, optparse, sys import Utils, Options, Configure import ccroot, ar from Configure import conftest import gcc @conftest def gcc_modifier_hpux(conf): v=conf.env v['CCFLAGS_DEBUG']=['-g'] v['CCFLAGS_RELEASE']=['-O2'] v['CC_SRC_F']='' v['CC_TGT_F']=['-c','-o',''] v['CPPPATH_ST']='-I%s' if not v['LINK_CC']:v['LINK_CC']=v['CC'] v['CCLNK_SRC_F']='' v['CCLNK_TGT_F']=['-o',''] v['LIB_ST']='-l%s' v['LIBPATH_ST']='-L%s' v['STATICLIB_ST']='-l%s' v['STATICLIBPATH_ST']='-L%s' v['RPATH_ST']='-Wl,-rpath,%s' v['CCDEFINES_ST']='-D%s' v['SONAME_ST']='-Wl,-h,%s' v['SHLIB_MARKER']=[] # v['STATICLIB_MARKER']='-Wl,-Bstatic' v['FULLSTATIC_MARKER']='-static' v['program_PATTERN']='%s' v['shlib_CCFLAGS']=['-fPIC','-DPIC'] v['shlib_LINKFLAGS']=['-shared'] v['shlib_PATTERN']='lib%s.sl' # v['staticlib_LINKFLAGS']=['-Wl,-Bstatic'] v['staticlib_PATTERN']='lib%s.a' gcc.gcc_modifier_hpux = gcc_modifier_hpux from TaskGen import feature, after @feature('cprogram', 'cshlib') @after('apply_link', 'apply_lib_vars', 'apply_obj_vars') def hpux_addfullpath(self): if sys.platform == 'hp-ux11': link = getattr(self, 'link_task', None) if link: lst = link.env.LINKFLAGS buf = [] for x in lst: if x.startswith('-L'): p2 = x[2:] if not os.path.isabs(p2): x = x[:2] + self.bld.srcnode.abspath(link.env) + "/../" + x[2:].lstrip('.') buf.append(x) link.env.LINKFLAGS = buf ntdb-1.0/buildtools/wafsamba/irixcc.py000066400000000000000000000037211224151530700201000ustar00rootroot00000000000000 # compiler definition for irix/MIPSpro cc compiler # based on suncc.py from waf import os, optparse import Utils, Options, Configure import ccroot, ar from Configure import conftest from compiler_cc import c_compiler c_compiler['irix'] = ['gcc', 'irixcc'] @conftest def find_irixcc(conf): v = conf.env cc = None if v['CC']: cc = v['CC'] elif 'CC' in conf.environ: cc = conf.environ['CC'] if not cc: cc = conf.find_program('cc', var='CC') if not cc: conf.fatal('irixcc was not found') cc = conf.cmd_to_list(cc) try: if Utils.cmd_output(cc + ['-c99'] + ['-version']) != '': conf.fatal('irixcc %r was not found' % cc) except ValueError: conf.fatal('irixcc -v could not be executed') conf.env.append_unique('CCFLAGS', '-c99') v['CC'] = cc v['CC_NAME'] = 'irix' @conftest def irixcc_common_flags(conf): v = conf.env v['CC_SRC_F'] = '' v['CC_TGT_F'] = ['-c', '-o', ''] v['CPPPATH_ST'] = '-I%s' # template for adding include paths # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = '' v['CCLNK_TGT_F'] = ['-o', ''] v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STATICLIB_ST'] = '-l%s' v['STATICLIBPATH_ST'] = '-L%s' v['CCDEFINES_ST'] = '-D%s' # v['SONAME_ST'] = '-Wl,-h -Wl,%s' # v['SHLIB_MARKER'] = '-Bdynamic' # v['STATICLIB_MARKER'] = '-Bstatic' # program v['program_PATTERN'] = '%s' # shared library # v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC'] # v['shlib_LINKFLAGS'] = ['-G'] v['shlib_PATTERN'] = 'lib%s.so' # static lib # v['staticlib_LINKFLAGS'] = ['-Bstatic'] # v['staticlib_PATTERN'] = 'lib%s.a' detect = ''' find_irixcc find_cpp find_ar irixcc_common_flags cc_load_tools cc_add_flags link_add_flags ''' ntdb-1.0/buildtools/wafsamba/nothreads.py000066400000000000000000000144561224151530700206150ustar00rootroot00000000000000# encoding: utf-8 # Thomas Nagy, 2005-2008 (ita) # this replaces the core of Runner.py in waf with a varient that works # on systems with completely broken threading (such as Python 2.5.x on # AIX). For simplicity we enable this when JOBS=1, which is triggered # by the compatibility makefile used for the waf build. That also ensures # this code is tested, as it means it is used in the build farm, and by # anyone using 'make' to build Samba with waf "Execute the tasks" import sys, random, time, threading, traceback, os try: from Queue import Queue except ImportError: from queue import Queue import Build, Utils, Logs, Options from Logs import debug, error from Constants import * GAP = 15 run_old = threading.Thread.run def run(*args, **kwargs): try: run_old(*args, **kwargs) except (KeyboardInterrupt, SystemExit): raise except: sys.excepthook(*sys.exc_info()) threading.Thread.run = run class TaskConsumer(object): consumers = 1 def process(tsk): m = tsk.master if m.stop: m.out.put(tsk) return try: tsk.generator.bld.printout(tsk.display()) if tsk.__class__.stat: ret = tsk.__class__.stat(tsk) # actual call to task's run() function else: ret = tsk.call_run() except Exception, e: tsk.err_msg = Utils.ex_stack() tsk.hasrun = EXCEPTION # TODO cleanup m.error_handler(tsk) m.out.put(tsk) return if ret: tsk.err_code = ret tsk.hasrun = CRASHED else: try: tsk.post_run() except Utils.WafError: pass except Exception: tsk.err_msg = Utils.ex_stack() tsk.hasrun = EXCEPTION else: tsk.hasrun = SUCCESS if tsk.hasrun != SUCCESS: m.error_handler(tsk) m.out.put(tsk) class Parallel(object): """ keep the consumer threads busy, and avoid consuming cpu cycles when no more tasks can be added (end of the build, etc) """ def __init__(self, bld, j=2): # number of consumers self.numjobs = j self.manager = bld.task_manager self.manager.current_group = 0 self.total = self.manager.total() # tasks waiting to be processed - IMPORTANT self.outstanding = [] self.maxjobs = MAXJOBS # tasks that are awaiting for another task to complete self.frozen = [] # tasks returned by the consumers self.out = Queue(0) self.count = 0 # tasks not in the producer area self.processed = 1 # progress indicator self.stop = False # error condition to stop the build self.error = False # error flag def get_next(self): "override this method to schedule the tasks in a particular order" if not self.outstanding: return None return self.outstanding.pop(0) def postpone(self, tsk): "override this method to schedule the tasks in a particular order" # TODO consider using a deque instead if random.randint(0, 1): self.frozen.insert(0, tsk) else: self.frozen.append(tsk) def refill_task_list(self): "called to set the next group of tasks" while self.count > self.numjobs + GAP or self.count >= self.maxjobs: self.get_out() while not self.outstanding: if self.count: self.get_out() if self.frozen: self.outstanding += self.frozen self.frozen = [] elif not self.count: (jobs, tmp) = self.manager.get_next_set() if jobs is not None: self.maxjobs = jobs if tmp: self.outstanding += tmp break def get_out(self): "the tasks that are put to execute are all collected using get_out" ret = self.out.get() self.manager.add_finished(ret) if not self.stop and getattr(ret, 'more_tasks', None): self.outstanding += ret.more_tasks self.total += len(ret.more_tasks) self.count -= 1 def error_handler(self, tsk): "by default, errors make the build stop (not thread safe so be careful)" if not Options.options.keep: self.stop = True self.error = True def start(self): "execute the tasks" while not self.stop: self.refill_task_list() # consider the next task tsk = self.get_next() if not tsk: if self.count: # tasks may add new ones after they are run continue else: # no tasks to run, no tasks running, time to exit break if tsk.hasrun: # if the task is marked as "run", just skip it self.processed += 1 self.manager.add_finished(tsk) continue try: st = tsk.runnable_status() except Exception, e: self.processed += 1 if self.stop and not Options.options.keep: tsk.hasrun = SKIPPED self.manager.add_finished(tsk) continue self.error_handler(tsk) self.manager.add_finished(tsk) tsk.hasrun = EXCEPTION tsk.err_msg = Utils.ex_stack() continue if st == ASK_LATER: self.postpone(tsk) elif st == SKIP_ME: self.processed += 1 tsk.hasrun = SKIPPED self.manager.add_finished(tsk) else: # run me: put the task in ready queue tsk.position = (self.processed, self.total) self.count += 1 self.processed += 1 tsk.master = self process(tsk) # self.count represents the tasks that have been made available to the consumer threads # collect all the tasks after an error else the message may be incomplete while self.error and self.count: self.get_out() #print loop assert (self.count == 0 or self.stop) # enable nothreads import Runner Runner.process = process Runner.Parallel = Parallel ntdb-1.0/buildtools/wafsamba/pkgconfig.py000066400000000000000000000044531224151530700205710ustar00rootroot00000000000000# handle substitution of variables in pc files import Build, sys, Logs from samba_utils import * def subst_at_vars(task): '''substiture @VAR@ style variables in a file''' src = task.inputs[0].srcpath(task.env) tgt = task.outputs[0].bldpath(task.env) f = open(src, 'r') s = f.read() f.close() # split on the vars a = re.split('(@\w+@)', s) out = [] done_var = {} back_sub = [ ('PREFIX', '${prefix}'), ('EXEC_PREFIX', '${exec_prefix}')] for v in a: if re.match('@\w+@', v): vname = v[1:-1] if not vname in task.env and vname.upper() in task.env: vname = vname.upper() if not vname in task.env: Logs.error("Unknown substitution %s in %s" % (v, task.name)) sys.exit(1) v = SUBST_VARS_RECURSIVE(task.env[vname], task.env) # now we back substitute the allowed pc vars for (b, m) in back_sub: s = task.env[b] if s == v[0:len(s)]: if not b in done_var: # we don't want to substitute the first usage done_var[b] = True else: v = m + v[len(s):] break out.append(v) contents = ''.join(out) f = open(tgt, 'w') s = f.write(contents) f.close() return 0 def PKG_CONFIG_FILES(bld, pc_files, vnum=None): '''install some pkg_config pc files''' dest = '${PKGCONFIGDIR}' dest = bld.EXPAND_VARIABLES(dest) for f in TO_LIST(pc_files): base=os.path.basename(f) t = bld.SAMBA_GENERATOR('PKGCONFIG_%s' % base, rule=subst_at_vars, source=f+'.in', target=f) bld.add_manual_dependency(bld.path.find_or_declare(f), bld.env['PREFIX']) t.vars = [] if t.env.RPATH_ON_INSTALL: t.env.LIB_RPATH = t.env.RPATH_ST % t.env.LIBDIR else: t.env.LIB_RPATH = '' if vnum: t.env.PACKAGE_VERSION = vnum for v in [ 'PREFIX', 'EXEC_PREFIX', 'LIB_RPATH' ]: t.vars.append(t.env[v]) bld.INSTALL_FILES(dest, f, flat=True, destname=base) Build.BuildContext.PKG_CONFIG_FILES = PKG_CONFIG_FILES ntdb-1.0/buildtools/wafsamba/samba3.py000066400000000000000000000123171224151530700177660ustar00rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section # and for SAMBA_ macros for building libraries, binaries etc import Options, Build, os from optparse import SUPPRESS_HELP from samba_utils import os_path_relpath, TO_LIST from samba_autoconf import library_flags def SAMBA3_ADD_OPTION(opt, option, help=(), dest=None, default=True, with_name="with", without_name="without"): if default is None: default_str="auto" elif default == True: default_str="yes" elif default == False: default_str="no" else: default_str=str(default) if help == (): help = ("Build with %s support (default=%s)" % (option, default_str)) if dest is None: dest = "with_%s" % option.replace('-', '_') with_val = "--%s-%s" % (with_name, option) without_val = "--%s-%s" % (without_name, option) #FIXME: This is broken and will always default to "default" no matter if # --with or --without is chosen. opt.add_option(with_val, help=help, action="store_true", dest=dest, default=default) opt.add_option(without_val, help=SUPPRESS_HELP, action="store_false", dest=dest) Options.Handler.SAMBA3_ADD_OPTION = SAMBA3_ADD_OPTION def SAMBA3_IS_STATIC_MODULE(bld, module): '''Check whether module is in static list''' if module in bld.env['static_modules']: return True return False Build.BuildContext.SAMBA3_IS_STATIC_MODULE = SAMBA3_IS_STATIC_MODULE def SAMBA3_IS_SHARED_MODULE(bld, module): '''Check whether module is in shared list''' if module in bld.env['shared_modules']: return True return False Build.BuildContext.SAMBA3_IS_SHARED_MODULE = SAMBA3_IS_SHARED_MODULE def SAMBA3_IS_ENABLED_MODULE(bld, module): '''Check whether module is in either shared or static list ''' return SAMBA3_IS_STATIC_MODULE(bld, module) or SAMBA3_IS_SHARED_MODULE(bld, module) Build.BuildContext.SAMBA3_IS_ENABLED_MODULE = SAMBA3_IS_ENABLED_MODULE def s3_fix_kwargs(bld, kwargs): '''fix the build arguments for s3 build rules to include the necessary includes, subdir and cflags options ''' s3dir = os.path.join(bld.env.srcdir, 'source3') s3reldir = os_path_relpath(s3dir, bld.curdir) # the extra_includes list is relative to the source3 directory extra_includes = [ '.', 'include', 'lib', '../lib/tdb_compat' ] # local heimdal paths only included when USING_SYSTEM_KRB5 is not set if not bld.CONFIG_SET("USING_SYSTEM_KRB5"): extra_includes += [ '../source4/heimdal/lib/com_err', '../source4/heimdal/lib/krb5', '../source4/heimdal/lib/gssapi', '../source4/heimdal_build', '../bin/default/source4/heimdal/lib/asn1' ] if bld.CONFIG_SET('USING_SYSTEM_TDB'): (tdb_includes, tdb_ldflags, tdb_cpppath) = library_flags(bld, 'tdb') extra_includes += tdb_cpppath else: extra_includes += [ '../lib/tdb/include' ] if bld.CONFIG_SET('USING_SYSTEM_TEVENT'): (tevent_includes, tevent_ldflags, tevent_cpppath) = library_flags(bld, 'tevent') extra_includes += tevent_cpppath else: extra_includes += [ '../lib/tevent' ] if bld.CONFIG_SET('USING_SYSTEM_TALLOC'): (talloc_includes, talloc_ldflags, talloc_cpppath) = library_flags(bld, 'talloc') extra_includes += talloc_cpppath else: extra_includes += [ '../lib/talloc' ] if bld.CONFIG_SET('USING_SYSTEM_POPT'): (popt_includes, popt_ldflags, popt_cpppath) = library_flags(bld, 'popt') extra_includes += popt_cpppath else: extra_includes += [ '../lib/popt' ] if bld.CONFIG_SET('USING_SYSTEM_INIPARSER'): (iniparser_includes, iniparser_ldflags, iniparser_cpppath) = library_flags(bld, 'iniparser') extra_includes += iniparser_cpppath else: extra_includes += [ '../lib/iniparser' ] # s3 builds assume that they will have a bunch of extra include paths includes = [] for d in extra_includes: includes += [ os.path.join(s3reldir, d) ] # the rule may already have some includes listed if 'includes' in kwargs: includes += TO_LIST(kwargs['includes']) kwargs['includes'] = includes # these wrappers allow for mixing of S3 and S4 build rules in the one build def SAMBA3_LIBRARY(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_LIBRARY(name, *args, **kwargs) Build.BuildContext.SAMBA3_LIBRARY = SAMBA3_LIBRARY def SAMBA3_MODULE(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_MODULE(name, *args, **kwargs) Build.BuildContext.SAMBA3_MODULE = SAMBA3_MODULE def SAMBA3_SUBSYSTEM(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_SUBSYSTEM(name, *args, **kwargs) Build.BuildContext.SAMBA3_SUBSYSTEM = SAMBA3_SUBSYSTEM def SAMBA3_BINARY(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_BINARY(name, *args, **kwargs) Build.BuildContext.SAMBA3_BINARY = SAMBA3_BINARY def SAMBA3_PYTHON(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_PYTHON(name, *args, **kwargs) Build.BuildContext.SAMBA3_PYTHON = SAMBA3_PYTHON ntdb-1.0/buildtools/wafsamba/samba_abi.py000066400000000000000000000211401224151530700205100ustar00rootroot00000000000000# functions for handling ABI checking of libraries import Options, Utils, os, Logs, samba_utils, sys, Task, fnmatch, re, Build from TaskGen import feature, before, after # these type maps cope with platform specific names for common types # please add new type mappings into the list below abi_type_maps = { '_Bool' : 'bool', 'struct __va_list_tag *' : 'va_list' } version_key = lambda x: map(int, x.split(".")) def normalise_signature(sig): '''normalise a signature from gdb''' sig = sig.strip() sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}$', r'\1', sig) sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}(\s0x[0-9a-f]+\s<\w+>)+$', r'\1', sig) sig = re.sub('^\$[0-9]+\s=\s(0x[0-9a-f]+)\s?(<\w+>)?$', r'\1', sig) sig = re.sub('0x[0-9a-f]+', '0xXXXX', sig) sig = re.sub('", ', r'\1"', sig) for t in abi_type_maps: # we need to cope with non-word characters in mapped types m = t m = m.replace('*', '\*') if m[-1].isalnum() or m[-1] == '_': m += '\\b' if m[0].isalnum() or m[0] == '_': m = '\\b' + m sig = re.sub(m, abi_type_maps[t], sig) return sig def normalise_varargs(sig): '''cope with older versions of gdb''' sig = re.sub(',\s\.\.\.', '', sig) return sig def parse_sigs(sigs, abi_match): '''parse ABI signatures file''' abi_match = samba_utils.TO_LIST(abi_match) ret = {} a = sigs.split('\n') for s in a: if s.find(':') == -1: continue sa = s.split(':') if abi_match: matched = False negative = False for p in abi_match: if p[0] == '!' and fnmatch.fnmatch(sa[0], p[1:]): negative = True break elif fnmatch.fnmatch(sa[0], p): matched = True break if (not matched) and negative: continue Logs.debug("%s -> %s" % (sa[1], normalise_signature(sa[1]))) ret[sa[0]] = normalise_signature(sa[1]) return ret def save_sigs(sig_file, parsed_sigs): '''save ABI signatures to a file''' sigs = '' for s in sorted(parsed_sigs.keys()): sigs += '%s: %s\n' % (s, parsed_sigs[s]) return samba_utils.save_file(sig_file, sigs, create_dir=True) def abi_check_task(self): '''check if the ABI has changed''' abi_gen = self.ABI_GEN libpath = self.inputs[0].abspath(self.env) libname = os.path.basename(libpath) sigs = Utils.cmd_output([abi_gen, libpath]) parsed_sigs = parse_sigs(sigs, self.ABI_MATCH) sig_file = self.ABI_FILE old_sigs = samba_utils.load_file(sig_file) if old_sigs is None or Options.options.ABI_UPDATE: if not save_sigs(sig_file, parsed_sigs): raise Utils.WafError('Failed to save ABI file "%s"' % sig_file) Logs.warn('Generated ABI signatures %s' % sig_file) return parsed_old_sigs = parse_sigs(old_sigs, self.ABI_MATCH) # check all old sigs got_error = False for s in parsed_old_sigs: if not s in parsed_sigs: Logs.error('%s: symbol %s has been removed - please update major version\n\tsignature: %s' % ( libname, s, parsed_old_sigs[s])) got_error = True elif normalise_varargs(parsed_old_sigs[s]) != normalise_varargs(parsed_sigs[s]): Logs.error('%s: symbol %s has changed - please update major version\n\told_signature: %s\n\tnew_signature: %s' % ( libname, s, parsed_old_sigs[s], parsed_sigs[s])) got_error = True for s in parsed_sigs: if not s in parsed_old_sigs: Logs.error('%s: symbol %s has been added - please mark it _PRIVATE_ or update minor version\n\tsignature: %s' % ( libname, s, parsed_sigs[s])) got_error = True if got_error: raise Utils.WafError('ABI for %s has changed - please fix library version then build with --abi-update\nSee http://wiki.samba.org/index.php/Waf#ABI_Checking for more information\nIf you have not changed any ABI, and your platform always gives this error, please configure with --abi-check-disable to skip this check' % libname) t = Task.task_type_from_func('abi_check', abi_check_task, color='BLUE', ext_in='.bin') t.quiet = True # allow "waf --abi-check" to force re-checking the ABI if '--abi-check' in sys.argv: Task.always_run(t) @after('apply_link') @feature('abi_check') def abi_check(self): '''check that ABI matches saved signatures''' env = self.bld.env if not env.ABI_CHECK or self.abi_directory is None: return # if the platform doesn't support -fvisibility=hidden then the ABI # checks become fairly meaningless if not env.HAVE_VISIBILITY_ATTR: return topsrc = self.bld.srcnode.abspath() abi_gen = os.path.join(topsrc, 'buildtools/scripts/abi_gen.sh') abi_file = "%s/%s-%s.sigs" % (self.abi_directory, self.name, self.vnum) tsk = self.create_task('abi_check', self.link_task.outputs[0]) tsk.ABI_FILE = abi_file tsk.ABI_MATCH = self.abi_match tsk.ABI_GEN = abi_gen def abi_process_file(fname, version, symmap): '''process one ABI file, adding new symbols to the symmap''' f = open(fname, mode='r') for line in f: symname = line.split(":")[0] if not symname in symmap: symmap[symname] = version f.close() def abi_write_vscript(f, libname, current_version, versions, symmap, abi_match): """Write a vscript file for a library in --version-script format. :param f: File-like object to write to :param libname: Name of the library, uppercased :param current_version: Current version :param versions: Versions to consider :param symmap: Dictionary mapping symbols -> version :param abi_match: List of symbols considered to be public in the current version """ invmap = {} for s in symmap: invmap.setdefault(symmap[s], []).append(s) last_key = "" versions = sorted(versions, key=version_key) for k in versions: symver = "%s_%s" % (libname, k) if symver == current_version: break f.write("%s {\n" % symver) if k in sorted(invmap.keys()): f.write("\tglobal:\n") for s in invmap.get(k, []): f.write("\t\t%s;\n" % s); f.write("}%s;\n\n" % last_key) last_key = " %s" % symver f.write("%s {\n" % current_version) local_abi = filter(lambda x: x[0] == '!', abi_match) global_abi = filter(lambda x: x[0] != '!', abi_match) f.write("\tglobal:\n") if len(global_abi) > 0: for x in global_abi: f.write("\t\t%s;\n" % x) else: f.write("\t\t*;\n") if abi_match != ["*"]: f.write("\tlocal:\n") for x in local_abi: f.write("\t\t%s;\n" % x[1:]) if len(global_abi) > 0: f.write("\t\t*;\n") f.write("};\n") def abi_build_vscript(task): '''generate a vscript file for our public libraries''' tgt = task.outputs[0].bldpath(task.env) symmap = {} versions = [] for f in task.inputs: fname = f.abspath(task.env) basename = os.path.basename(fname) version = basename[len(task.env.LIBNAME)+1:-len(".sigs")] versions.append(version) abi_process_file(fname, version, symmap) f = open(tgt, mode='w') try: abi_write_vscript(f, task.env.LIBNAME, task.env.VERSION, versions, symmap, task.env.ABI_MATCH) finally: f.close() def ABI_VSCRIPT(bld, libname, abi_directory, version, vscript, abi_match=None): '''generate a vscript file for our public libraries''' if abi_directory: source = bld.path.ant_glob('%s/%s-[0-9]*.sigs' % (abi_directory, libname)) def abi_file_key(path): return version_key(path[:-len(".sigs")].rsplit("-")[-1]) source = sorted(source.split(), key=abi_file_key) else: source = '' libname = os.path.basename(libname) version = os.path.basename(version) libname = libname.replace("-", "_").replace("+","_").upper() version = version.replace("-", "_").replace("+","_").upper() t = bld.SAMBA_GENERATOR(vscript, rule=abi_build_vscript, source=source, group='vscripts', target=vscript) if abi_match is None: abi_match = ["*"] else: abi_match = samba_utils.TO_LIST(abi_match) t.env.ABI_MATCH = abi_match t.env.VERSION = version t.env.LIBNAME = libname t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH'] Build.BuildContext.ABI_VSCRIPT = ABI_VSCRIPT ntdb-1.0/buildtools/wafsamba/samba_autoconf.py000066400000000000000000000606751224151530700216130ustar00rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section import Build, os, sys, Options, preproc, Logs import string from Configure import conf from samba_utils import * import samba_cross missing_headers = set() #################################################### # some autoconf like helpers, to make the transition # to waf a bit easier for those used to autoconf # m4 files @runonce @conf def DEFINE(conf, d, v, add_to_cflags=False, quote=False): '''define a config option''' conf.define(d, v, quote=quote) if add_to_cflags: conf.env.append_value('CCDEFINES', d + '=' + str(v)) def hlist_to_string(conf, headers=None): '''convert a headers list to a set of #include lines''' hdrs='' hlist = conf.env.hlist if headers: hlist = hlist[:] hlist.extend(TO_LIST(headers)) for h in hlist: hdrs += '#include <%s>\n' % h return hdrs @conf def COMPOUND_START(conf, msg): '''start a compound test''' def null_check_message_1(self,*k,**kw): return def null_check_message_2(self,*k,**kw): return v = getattr(conf.env, 'in_compound', []) if v != [] and v != 0: conf.env.in_compound = v + 1 return conf.check_message_1(msg) conf.saved_check_message_1 = conf.check_message_1 conf.check_message_1 = null_check_message_1 conf.saved_check_message_2 = conf.check_message_2 conf.check_message_2 = null_check_message_2 conf.env.in_compound = 1 @conf def COMPOUND_END(conf, result): '''start a compound test''' conf.env.in_compound -= 1 if conf.env.in_compound != 0: return conf.check_message_1 = conf.saved_check_message_1 conf.check_message_2 = conf.saved_check_message_2 p = conf.check_message_2 if result is True: p('ok') elif not result: p('not found', 'YELLOW') else: p(result) @feature('nolink') def nolink(self): '''using the nolink type in conf.check() allows us to avoid the link stage of a test, thus speeding it up for tests that where linking is not needed''' pass def CHECK_HEADER(conf, h, add_headers=False, lib=None): '''check for a header''' if h in missing_headers and lib is None: return False d = h.upper().replace('/', '_') d = d.replace('.', '_') d = d.replace('-', '_') d = 'HAVE_%s' % d if CONFIG_SET(conf, d): if add_headers: if not h in conf.env.hlist: conf.env.hlist.append(h) return True (ccflags, ldflags, cpppath) = library_flags(conf, lib) hdrs = hlist_to_string(conf, headers=h) if lib is None: lib = "" ret = conf.check(fragment='%s\nint main(void) { return 0; }' % hdrs, type='nolink', execute=0, ccflags=ccflags, includes=cpppath, uselib=lib.upper(), msg="Checking for header %s" % h) if not ret: missing_headers.add(h) return False conf.DEFINE(d, 1) if add_headers and not h in conf.env.hlist: conf.env.hlist.append(h) return ret @conf def CHECK_HEADERS(conf, headers, add_headers=False, together=False, lib=None): '''check for a list of headers when together==True, then the headers accumulate within this test. This is useful for interdependent headers ''' ret = True if not add_headers and together: saved_hlist = conf.env.hlist[:] set_add_headers = True else: set_add_headers = add_headers for hdr in TO_LIST(headers): if not CHECK_HEADER(conf, hdr, set_add_headers, lib=lib): ret = False if not add_headers and together: conf.env.hlist = saved_hlist return ret def header_list(conf, headers=None, lib=None): '''form a list of headers which exist, as a string''' hlist=[] if headers is not None: for h in TO_LIST(headers): if CHECK_HEADER(conf, h, add_headers=False, lib=lib): hlist.append(h) return hlist_to_string(conf, headers=hlist) @conf def CHECK_TYPE(conf, t, alternate=None, headers=None, define=None, lib=None, msg=None): '''check for a single type''' if define is None: define = 'HAVE_' + t.upper().replace(' ', '_') if msg is None: msg='Checking for %s' % t ret = CHECK_CODE(conf, '%s _x' % t, define, execute=False, headers=headers, local_include=False, msg=msg, lib=lib, link=False) if not ret and alternate: conf.DEFINE(t, alternate) return ret @conf def CHECK_TYPES(conf, list, headers=None, define=None, alternate=None, lib=None): '''check for a list of types''' ret = True for t in TO_LIST(list): if not CHECK_TYPE(conf, t, headers=headers, define=define, alternate=alternate, lib=lib): ret = False return ret @conf def CHECK_TYPE_IN(conf, t, headers=None, alternate=None, define=None): '''check for a single type with a header''' return CHECK_TYPE(conf, t, headers=headers, alternate=alternate, define=define) @conf def CHECK_VARIABLE(conf, v, define=None, always=False, headers=None, msg=None, lib=None): '''check for a variable declaration (or define)''' if define is None: define = 'HAVE_%s' % v.upper() if msg is None: msg="Checking for variable %s" % v return CHECK_CODE(conf, # we need to make sure the compiler doesn't # optimize it out... ''' #ifndef %s void *_x; _x=(void *)&%s; return (int)_x; #endif return 0 ''' % (v, v), execute=False, link=False, msg=msg, local_include=False, lib=lib, headers=headers, define=define, always=always) @conf def CHECK_DECLS(conf, vars, reverse=False, headers=None, always=False): '''check a list of variable declarations, using the HAVE_DECL_xxx form of define When reverse==True then use HAVE_xxx_DECL instead of HAVE_DECL_xxx ''' ret = True for v in TO_LIST(vars): if not reverse: define='HAVE_DECL_%s' % v.upper() else: define='HAVE_%s_DECL' % v.upper() if not CHECK_VARIABLE(conf, v, define=define, headers=headers, msg='Checking for declaration of %s' % v, always=always): ret = False return ret def CHECK_FUNC(conf, f, link=True, lib=None, headers=None): '''check for a function''' define='HAVE_%s' % f.upper() ret = False conf.COMPOUND_START('Checking for %s' % f) if link is None or link: ret = CHECK_CODE(conf, # this is based on the autoconf strategy ''' #define %s __fake__%s #ifdef HAVE_LIMITS_H # include #else # include #endif #undef %s #if defined __stub_%s || defined __stub___%s #error "bad glibc stub" #endif extern char %s(); int main() { return %s(); } ''' % (f, f, f, f, f, f, f), execute=False, link=True, addmain=False, add_headers=False, define=define, local_include=False, lib=lib, headers=headers, msg='Checking for %s' % f) if not ret: ret = CHECK_CODE(conf, # it might be a macro # we need to make sure the compiler doesn't # optimize it out... 'void *__x = (void *)%s; return (int)__x' % f, execute=False, link=True, addmain=True, add_headers=True, define=define, local_include=False, lib=lib, headers=headers, msg='Checking for macro %s' % f) if not ret and (link is None or not link): ret = CHECK_VARIABLE(conf, f, define=define, headers=headers, msg='Checking for declaration of %s' % f) conf.COMPOUND_END(ret) return ret @conf def CHECK_FUNCS(conf, list, link=True, lib=None, headers=None): '''check for a list of functions''' ret = True for f in TO_LIST(list): if not CHECK_FUNC(conf, f, link=link, lib=lib, headers=headers): ret = False return ret @conf def CHECK_SIZEOF(conf, vars, headers=None, define=None): '''check the size of a type''' ret = True for v in TO_LIST(vars): v_define = define if v_define is None: v_define = 'SIZEOF_%s' % v.upper().replace(' ', '_') if not CHECK_CODE(conf, 'printf("%%u", (unsigned)sizeof(%s))' % v, define=v_define, execute=True, define_ret=True, quote=False, headers=headers, local_include=False, msg="Checking size of %s" % v): ret = False return ret @conf def CHECK_VALUEOF(conf, v, headers=None, define=None): '''check the value of a variable/define''' ret = True v_define = define if v_define is None: v_define = 'VALUEOF_%s' % v.upper().replace(' ', '_') if CHECK_CODE(conf, 'printf("%%u", (unsigned)(%s))' % v, define=v_define, execute=True, define_ret=True, quote=False, headers=headers, local_include=False, msg="Checking value of %s" % v): return int(conf.env[v_define]) return None @conf def CHECK_CODE(conf, code, define, always=False, execute=False, addmain=True, add_headers=True, mandatory=False, headers=None, msg=None, cflags='', includes='# .', local_include=True, lib=None, link=True, define_ret=False, quote=False, on_target=True): '''check if some code compiles and/or runs''' if CONFIG_SET(conf, define): return True if headers is not None: CHECK_HEADERS(conf, headers=headers, lib=lib) if add_headers: hdrs = header_list(conf, headers=headers, lib=lib) else: hdrs = '' if execute: execute = 1 else: execute = 0 defs = conf.get_config_header() if addmain: fragment='%s\n%s\n int main(void) { %s; return 0; }\n' % (defs, hdrs, code) else: fragment='%s\n%s\n%s\n' % (defs, hdrs, code) if msg is None: msg="Checking for %s" % define cflags = TO_LIST(cflags) if local_include: cflags.append('-I%s' % conf.curdir) if not link: type='nolink' else: type='cprogram' uselib = TO_LIST(lib) (ccflags, ldflags, cpppath) = library_flags(conf, uselib) includes = TO_LIST(includes) includes.extend(cpppath) uselib = [l.upper() for l in uselib] cflags.extend(ccflags) if on_target: exec_args = conf.SAMBA_CROSS_ARGS(msg=msg) else: exec_args = [] conf.COMPOUND_START(msg) ret = conf.check(fragment=fragment, execute=execute, define_name = define, mandatory = mandatory, ccflags=cflags, ldflags=ldflags, includes=includes, uselib=uselib, type=type, msg=msg, quote=quote, exec_args=exec_args, define_ret=define_ret) if not ret and CONFIG_SET(conf, define): # sometimes conf.check() returns false, but it # sets the define. Maybe a waf bug? ret = True if ret: if not define_ret: conf.DEFINE(define, 1) conf.COMPOUND_END(True) else: conf.COMPOUND_END(conf.env[define]) return True if always: conf.DEFINE(define, 0) conf.COMPOUND_END(False) return False @conf def CHECK_STRUCTURE_MEMBER(conf, structname, member, always=False, define=None, headers=None): '''check for a structure member''' if define is None: define = 'HAVE_%s' % member.upper() return CHECK_CODE(conf, '%s s; void *_x; _x=(void *)&s.%s' % (structname, member), define, execute=False, link=False, always=always, headers=headers, local_include=False, msg="Checking for member %s in %s" % (member, structname)) @conf def CHECK_CFLAGS(conf, cflags, fragment='int main(void) { return 0; }\n'): '''check if the given cflags are accepted by the compiler ''' return conf.check(fragment=fragment, execute=0, type='nolink', ccflags=cflags, msg="Checking compiler accepts %s" % cflags) @conf def CHECK_LDFLAGS(conf, ldflags): '''check if the given ldflags are accepted by the linker ''' return conf.check(fragment='int main(void) { return 0; }\n', execute=0, ldflags=ldflags, msg="Checking linker accepts %s" % ldflags) @conf def CONFIG_GET(conf, option): '''return True if a configuration option was found''' if (option in conf.env): return conf.env[option] else: return None @conf def CONFIG_SET(conf, option): '''return True if a configuration option was found''' if option not in conf.env: return False v = conf.env[option] if v is None: return False if v == []: return False if v == (): return False return True Build.BuildContext.CONFIG_SET = CONFIG_SET Build.BuildContext.CONFIG_GET = CONFIG_GET def library_flags(self, libs): '''work out flags from pkg_config''' ccflags = [] ldflags = [] cpppath = [] for lib in TO_LIST(libs): # note that we do not add the -I and -L in here, as that is added by the waf # core. Adding it here would just change the order that it is put on the link line # which can cause system paths to be added before internal libraries extra_ccflags = TO_LIST(getattr(self.env, 'CCFLAGS_%s' % lib.upper(), [])) extra_ldflags = TO_LIST(getattr(self.env, 'LDFLAGS_%s' % lib.upper(), [])) extra_cpppath = TO_LIST(getattr(self.env, 'CPPPATH_%s' % lib.upper(), [])) ccflags.extend(extra_ccflags) ldflags.extend(extra_ldflags) cpppath.extend(extra_cpppath) if 'EXTRA_LDFLAGS' in self.env: ldflags.extend(self.env['EXTRA_LDFLAGS']) ccflags = unique_list(ccflags) ldflags = unique_list(ldflags) cpppath = unique_list(cpppath) return (ccflags, ldflags, cpppath) @conf def CHECK_LIB(conf, libs, mandatory=False, empty_decl=True, set_target=True, shlib=False): '''check if a set of libraries exist as system libraries returns the sublist of libs that do exist as a syslib or [] ''' fragment= ''' int foo() { int v = 2; return v*2; } ''' ret = [] liblist = TO_LIST(libs) for lib in liblist[:]: if GET_TARGET_TYPE(conf, lib) == 'SYSLIB': ret.append(lib) continue (ccflags, ldflags, cpppath) = library_flags(conf, lib) if shlib: res = conf.check(features='cc cshlib', fragment=fragment, lib=lib, uselib_store=lib, ccflags=ccflags, ldflags=ldflags, uselib=lib.upper()) else: res = conf.check(lib=lib, uselib_store=lib, ccflags=ccflags, ldflags=ldflags, uselib=lib.upper()) if not res: if mandatory: Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list)) sys.exit(1) if empty_decl: # if it isn't a mandatory library, then remove it from dependency lists if set_target: SET_TARGET_TYPE(conf, lib, 'EMPTY') else: conf.define('HAVE_LIB%s' % lib.upper().replace('-','_'), 1) conf.env['LIB_' + lib.upper()] = lib if set_target: conf.SET_TARGET_TYPE(lib, 'SYSLIB') ret.append(lib) return ret @conf def CHECK_FUNCS_IN(conf, list, library, mandatory=False, checklibc=False, headers=None, link=True, empty_decl=True, set_target=True): """ check that the functions in 'list' are available in 'library' if they are, then make that library available as a dependency if the library is not available and mandatory==True, then raise an error. If the library is not available and mandatory==False, then add the library to the list of dependencies to remove from build rules optionally check for the functions first in libc """ remaining = TO_LIST(list) liblist = TO_LIST(library) # check if some already found for f in remaining[:]: if CONFIG_SET(conf, 'HAVE_%s' % f.upper()): remaining.remove(f) # see if the functions are in libc if checklibc: for f in remaining[:]: if CHECK_FUNC(conf, f, link=True, headers=headers): remaining.remove(f) if remaining == []: for lib in liblist: if GET_TARGET_TYPE(conf, lib) != 'SYSLIB' and empty_decl: SET_TARGET_TYPE(conf, lib, 'EMPTY') return True checklist = conf.CHECK_LIB(liblist, empty_decl=empty_decl, set_target=set_target) for lib in liblist[:]: if not lib in checklist and mandatory: Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list)) sys.exit(1) ret = True for f in remaining: if not CHECK_FUNC(conf, f, lib=' '.join(checklist), headers=headers, link=link): ret = False return ret @conf def IN_LAUNCH_DIR(conf): '''return True if this rule is being run from the launch directory''' return os.path.realpath(conf.curdir) == os.path.realpath(Options.launch_dir) Options.Handler.IN_LAUNCH_DIR = IN_LAUNCH_DIR @conf def SAMBA_CONFIG_H(conf, path=None): '''write out config.h in the right directory''' # we don't want to produce a config.h in places like lib/replace # when we are building projects that depend on lib/replace if not IN_LAUNCH_DIR(conf): return if Options.options.debug: conf.ADD_CFLAGS('-g', testflags=True) if Options.options.developer: # we add these here to ensure that -Wstrict-prototypes is not set during configure conf.ADD_CFLAGS('-Wall -g -Wshadow -Werror=strict-prototypes -Wstrict-prototypes -Werror=pointer-arith -Wpointer-arith -Wcast-align -Werror=write-strings -Wwrite-strings -Werror-implicit-function-declaration -Wformat=2 -Wno-format-y2k -Wmissing-prototypes -fno-common -Werror=address', testflags=True) conf.ADD_CFLAGS('-Wcast-qual', testflags=True) conf.env.DEVELOPER_MODE = True # This check is because for ldb_search(), a NULL format string # is not an error, but some compilers complain about that. if CHECK_CFLAGS(conf, ["-Werror=format", "-Wformat=2"], ''' int testformat(char *format, ...) __attribute__ ((format (__printf__, 1, 2))); int main(void) { testformat(0); return 0; } '''): if not 'EXTRA_CFLAGS' in conf.env: conf.env['EXTRA_CFLAGS'] = [] conf.env['EXTRA_CFLAGS'].extend(TO_LIST("-Werror=format")) if Options.options.picky_developer: conf.ADD_CFLAGS('-Werror', testflags=True) if Options.options.fatal_errors: conf.ADD_CFLAGS('-Wfatal-errors', testflags=True) if Options.options.pedantic: conf.ADD_CFLAGS('-W', testflags=True) if path is None: conf.write_config_header('config.h', top=True) else: conf.write_config_header(path) conf.SAMBA_CROSS_CHECK_COMPLETE() @conf def CONFIG_PATH(conf, name, default): '''setup a configurable path''' if not name in conf.env: if default[0] == '/': conf.env[name] = default else: conf.env[name] = conf.env['PREFIX'] + default @conf def ADD_CFLAGS(conf, flags, testflags=False): '''add some CFLAGS to the command line optionally set testflags to ensure all the flags work ''' if testflags: ok_flags=[] for f in flags.split(): if CHECK_CFLAGS(conf, f): ok_flags.append(f) flags = ok_flags if not 'EXTRA_CFLAGS' in conf.env: conf.env['EXTRA_CFLAGS'] = [] conf.env['EXTRA_CFLAGS'].extend(TO_LIST(flags)) @conf def ADD_LDFLAGS(conf, flags, testflags=False): '''add some LDFLAGS to the command line optionally set testflags to ensure all the flags work this will return the flags that are added, if any ''' if testflags: ok_flags=[] for f in flags.split(): if CHECK_LDFLAGS(conf, f): ok_flags.append(f) flags = ok_flags if not 'EXTRA_LDFLAGS' in conf.env: conf.env['EXTRA_LDFLAGS'] = [] conf.env['EXTRA_LDFLAGS'].extend(TO_LIST(flags)) return flags @conf def ADD_EXTRA_INCLUDES(conf, includes): '''add some extra include directories to all builds''' if not 'EXTRA_INCLUDES' in conf.env: conf.env['EXTRA_INCLUDES'] = [] conf.env['EXTRA_INCLUDES'].extend(TO_LIST(includes)) def CURRENT_CFLAGS(bld, target, cflags, hide_symbols=False): '''work out the current flags. local flags are added first''' if not 'EXTRA_CFLAGS' in bld.env: list = [] else: list = bld.env['EXTRA_CFLAGS']; ret = TO_LIST(cflags) ret.extend(list) if hide_symbols and bld.env.HAVE_VISIBILITY_ATTR: ret.append('-fvisibility=hidden') return ret @conf def CHECK_CC_ENV(conf): """trim whitespaces from 'CC'. The build farm sometimes puts a space at the start""" if os.environ.get('CC'): conf.env.CC = TO_LIST(os.environ.get('CC')) if len(conf.env.CC) == 1: # make for nicer logs if just a single command conf.env.CC = conf.env.CC[0] @conf def SETUP_CONFIGURE_CACHE(conf, enable): '''enable/disable cache of configure results''' if enable: # when -C is chosen, we will use a private cache and will # not look into system includes. This roughtly matches what # autoconf does with -C cache_path = os.path.join(conf.blddir, '.confcache') mkdir_p(cache_path) Options.cache_global = os.environ['WAFCACHE'] = cache_path else: # when -C is not chosen we will not cache configure checks # We set the recursion limit low to prevent waf from spending # a lot of time on the signatures of the files. Options.cache_global = os.environ['WAFCACHE'] = '' preproc.recursion_limit = 1 # in either case we don't need to scan system includes preproc.go_absolute = False @conf def SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS(conf): # we don't want any libraries or modules to rely on runtime # resolution of symbols if not sys.platform.startswith("openbsd"): conf.env.undefined_ldflags = conf.ADD_LDFLAGS('-Wl,-no-undefined', testflags=True) if not sys.platform.startswith("openbsd") and conf.env.undefined_ignore_ldflags == []: if conf.CHECK_LDFLAGS(['-undefined', 'dynamic_lookup']): conf.env.undefined_ignore_ldflags = ['-undefined', 'dynamic_lookup'] ntdb-1.0/buildtools/wafsamba/samba_autoproto.py000066400000000000000000000014261224151530700220160ustar00rootroot00000000000000# waf build tool for building automatic prototypes from C source import Build from samba_utils import * def SAMBA_AUTOPROTO(bld, header, source): '''rule for samba prototype generation''' bld.SET_BUILD_GROUP('prototypes') relpath = os_path_relpath(bld.curdir, bld.srcnode.abspath()) name = os.path.join(relpath, header) SET_TARGET_TYPE(bld, name, 'PROTOTYPE') t = bld( name = name, source = source, target = header, on_results=True, ext_out='.c', before ='cc', rule = '${PERL} "${SCRIPT}/mkproto.pl" --srcdir=.. --builddir=. --public=/dev/null --private="${TGT}" ${SRC}' ) t.env.SCRIPT = os.path.join(bld.srcnode.abspath(), 'source4/script') Build.BuildContext.SAMBA_AUTOPROTO = SAMBA_AUTOPROTO ntdb-1.0/buildtools/wafsamba/samba_bundled.py000066400000000000000000000206221224151530700213760ustar00rootroot00000000000000# functions to support bundled libraries from Configure import conf import sys, Logs from samba_utils import * def PRIVATE_NAME(bld, name, private_extension, private_library): '''possibly rename a library to include a bundled extension''' # we now use the same private name for libraries as the public name. # see http://git.samba.org/?p=tridge/junkcode.git;a=tree;f=shlib for a # demonstration that this is the right thing to do # also see http://lists.samba.org/archive/samba-technical/2011-January/075816.html return name def target_in_list(target, lst, default): for l in lst: if target == l: return True if '!' + target == l: return False if l == 'ALL': return True if l == 'NONE': return False return default def BUILTIN_LIBRARY(bld, name): '''return True if a library should be builtin instead of being built as a shared lib''' return target_in_list(name, bld.env.BUILTIN_LIBRARIES, False) Build.BuildContext.BUILTIN_LIBRARY = BUILTIN_LIBRARY def BUILTIN_DEFAULT(opt, builtins): '''set a comma separated default list of builtin libraries for this package''' if 'BUILTIN_LIBRARIES_DEFAULT' in Options.options: return Options.options['BUILTIN_LIBRARIES_DEFAULT'] = builtins Options.Handler.BUILTIN_DEFAULT = BUILTIN_DEFAULT def PRIVATE_EXTENSION_DEFAULT(opt, extension, noextension=''): '''set a default private library extension''' if 'PRIVATE_EXTENSION_DEFAULT' in Options.options: return Options.options['PRIVATE_EXTENSION_DEFAULT'] = extension Options.options['PRIVATE_EXTENSION_EXCEPTION'] = noextension Options.Handler.PRIVATE_EXTENSION_DEFAULT = PRIVATE_EXTENSION_DEFAULT def minimum_library_version(conf, libname, default): '''allow override of mininum system library version''' minlist = Options.options.MINIMUM_LIBRARY_VERSION if not minlist: return default for m in minlist.split(','): a = m.split(':') if len(a) != 2: Logs.error("Bad syntax for --minimum-library-version of %s" % m) sys.exit(1) if a[0] == libname: return a[1] return default @conf def LIB_MAY_BE_BUNDLED(conf, libname): return ('NONE' not in conf.env.BUNDLED_LIBS and '!%s' % libname not in conf.env.BUNDLED_LIBS) @conf def LIB_MUST_BE_BUNDLED(conf, libname): return ('ALL' in conf.env.BUNDLED_LIBS or libname in conf.env.BUNDLED_LIBS) @conf def LIB_MUST_BE_PRIVATE(conf, libname): return ('ALL' in conf.env.PRIVATE_LIBS or libname in conf.env.PRIVATE_LIBS) @conf def CHECK_PREREQUISITES(conf, prereqs): missing = [] for syslib in TO_LIST(prereqs): f = 'FOUND_SYSTEMLIB_%s' % syslib if not f in conf.env: missing.append(syslib) return missing @runonce @conf def CHECK_BUNDLED_SYSTEM_PKG(conf, libname, minversion='0.0.0', onlyif=None, implied_deps=None, pkg=None): '''check if a library is available as a system library. This only tries using pkg-config ''' if conf.LIB_MUST_BE_BUNDLED(libname): return False found = 'FOUND_SYSTEMLIB_%s' % libname if found in conf.env: return conf.env[found] # see if the library should only use a system version if another dependent # system version is found. That prevents possible use of mixed library # versions if onlyif: missing = conf.CHECK_PREREQUISITES(onlyif) if missing: if not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing)) sys.exit(1) conf.env[found] = False return False minversion = minimum_library_version(conf, libname, minversion) msg = 'Checking for system %s' % libname if minversion != '0.0.0': msg += ' >= %s' % minversion if pkg is None: pkg = libname if conf.check_cfg(package=pkg, args='"%s >= %s" --cflags --libs' % (pkg, minversion), msg=msg, uselib_store=libname.upper()): conf.SET_TARGET_TYPE(libname, 'SYSLIB') conf.env[found] = True if implied_deps: conf.SET_SYSLIB_DEPS(libname, implied_deps) return True conf.env[found] = False if not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion)) sys.exit(1) return False @runonce @conf def CHECK_BUNDLED_SYSTEM(conf, libname, minversion='0.0.0', checkfunctions=None, headers=None, onlyif=None, implied_deps=None, require_headers=True): '''check if a library is available as a system library. this first tries via pkg-config, then if that fails tries by testing for a specified function in the specified lib ''' if conf.LIB_MUST_BE_BUNDLED(libname): return False found = 'FOUND_SYSTEMLIB_%s' % libname if found in conf.env: return conf.env[found] def check_functions_headers(): '''helper function for CHECK_BUNDLED_SYSTEM''' if checkfunctions is None: return True if require_headers and headers and not conf.CHECK_HEADERS(headers, lib=libname): return False return conf.CHECK_FUNCS_IN(checkfunctions, libname, headers=headers, empty_decl=False, set_target=False) # see if the library should only use a system version if another dependent # system version is found. That prevents possible use of mixed library # versions if onlyif: missing = conf.CHECK_PREREQUISITES(onlyif) if missing: if not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing)) sys.exit(1) conf.env[found] = False return False minversion = minimum_library_version(conf, libname, minversion) msg = 'Checking for system %s' % libname if minversion != '0.0.0': msg += ' >= %s' % minversion # try pkgconfig first if (conf.check_cfg(package=libname, args='"%s >= %s" --cflags --libs' % (libname, minversion), msg=msg) and check_functions_headers()): conf.SET_TARGET_TYPE(libname, 'SYSLIB') conf.env[found] = True if implied_deps: conf.SET_SYSLIB_DEPS(libname, implied_deps) return True if checkfunctions is not None: if check_functions_headers(): conf.env[found] = True if implied_deps: conf.SET_SYSLIB_DEPS(libname, implied_deps) conf.SET_TARGET_TYPE(libname, 'SYSLIB') return True conf.env[found] = False if not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion)) sys.exit(1) return False def tuplize_version(version): return tuple([int(x) for x in version.split(".")]) @runonce @conf def CHECK_BUNDLED_SYSTEM_PYTHON(conf, libname, modulename, minversion='0.0.0'): '''check if a python module is available on the system and has the specified minimum version. ''' if conf.LIB_MUST_BE_BUNDLED(libname): return False # see if the library should only use a system version if another dependent # system version is found. That prevents possible use of mixed library # versions minversion = minimum_library_version(conf, libname, minversion) try: m = __import__(modulename) except ImportError: found = False else: try: version = m.__version__ except AttributeError: found = False else: found = tuplize_version(version) >= tuplize_version(minversion) if not found and not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: Python module %s of version %s not found, and bundling disabled' % (libname, minversion)) sys.exit(1) return found def NONSHARED_BINARY(bld, name): '''return True if a binary should be built without non-system shared libs''' return target_in_list(name, bld.env.NONSHARED_BINARIES, False) Build.BuildContext.NONSHARED_BINARY = NONSHARED_BINARY ntdb-1.0/buildtools/wafsamba/samba_conftests.py000066400000000000000000000361441224151530700217770ustar00rootroot00000000000000# a set of config tests that use the samba_autoconf functions # to test for commonly needed configuration options import os, shutil, re import Build, Configure, Utils from Configure import conf from samba_utils import * def add_option(self, *k, **kw): '''syntax help: provide the "match" attribute to opt.add_option() so that folders can be added to specific config tests''' match = kw.get('match', []) if match: del kw['match'] opt = self.parser.add_option(*k, **kw) opt.match = match return opt Options.Handler.add_option = add_option @conf def check(self, *k, **kw): '''Override the waf defaults to inject --with-directory options''' if not 'env' in kw: kw['env'] = self.env.copy() # match the configuration test with speficic options, for example: # --with-libiconv -> Options.options.iconv_open -> "Checking for library iconv" additional_dirs = [] if 'msg' in kw: msg = kw['msg'] for x in Options.Handler.parser.parser.option_list: if getattr(x, 'match', None) and msg in x.match: d = getattr(Options.options, x.dest, '') if d: additional_dirs.append(d) # we add the additional dirs twice: once for the test data, and again if the compilation test suceeds below def add_options_dir(dirs, env): for x in dirs: if not x in env.CPPPATH: env.CPPPATH = [os.path.join(x, 'include')] + env.CPPPATH if not x in env.LIBPATH: env.LIBPATH = [os.path.join(x, 'lib')] + env.LIBPATH add_options_dir(additional_dirs, kw['env']) self.validate_c(kw) self.check_message_1(kw['msg']) ret = None try: ret = self.run_c_code(*k, **kw) except Configure.ConfigurationError, e: self.check_message_2(kw['errmsg'], 'YELLOW') if 'mandatory' in kw and kw['mandatory']: if Logs.verbose > 1: raise else: self.fatal('the configuration failed (see %r)' % self.log.name) else: kw['success'] = ret self.check_message_2(self.ret_msg(kw['okmsg'], kw)) # success! keep the CPPPATH/LIBPATH add_options_dir(additional_dirs, self.env) self.post_check(*k, **kw) if not kw.get('execute', False): return ret == 0 return ret @conf def CHECK_ICONV(conf, define='HAVE_NATIVE_ICONV'): '''check if the iconv library is installed optionally pass a define''' if conf.CHECK_FUNCS_IN('iconv_open', 'iconv', checklibc=True, headers='iconv.h'): conf.DEFINE(define, 1) return True return False @conf def CHECK_LARGEFILE(conf, define='HAVE_LARGEFILE'): '''see what we need for largefile support''' getconf_cflags = conf.CHECK_COMMAND(['getconf', 'LFS_CFLAGS']); if getconf_cflags is not False: if (conf.CHECK_CODE('return !(sizeof(off_t) >= 8)', define='WORKING_GETCONF_LFS_CFLAGS', execute=True, cflags=getconf_cflags, msg='Checking getconf large file support flags work')): conf.ADD_CFLAGS(getconf_cflags) getconf_cflags_list=TO_LIST(getconf_cflags) for flag in getconf_cflags_list: if flag[:2] == "-D": flag_split = flag[2:].split('=') if len(flag_split) == 1: conf.DEFINE(flag_split[0], '1') else: conf.DEFINE(flag_split[0], flag_split[1]) if conf.CHECK_CODE('return !(sizeof(off_t) >= 8)', define, execute=True, msg='Checking for large file support without additional flags'): return True if conf.CHECK_CODE('return !(sizeof(off_t) >= 8)', define, execute=True, cflags='-D_FILE_OFFSET_BITS=64', msg='Checking for -D_FILE_OFFSET_BITS=64'): conf.DEFINE('_FILE_OFFSET_BITS', 64) return True if conf.CHECK_CODE('return !(sizeof(off_t) >= 8)', define, execute=True, cflags='-D_LARGE_FILES', msg='Checking for -D_LARGE_FILES'): conf.DEFINE('_LARGE_FILES', 1) return True return False @conf def CHECK_C_PROTOTYPE(conf, function, prototype, define, headers=None, msg=None): '''verify that a C prototype matches the one on the current system''' if not conf.CHECK_DECLS(function, headers=headers): return False if not msg: msg = 'Checking C prototype for %s' % function return conf.CHECK_CODE('%s; void *_x = (void *)%s' % (prototype, function), define=define, local_include=False, headers=headers, link=False, execute=False, msg=msg) @conf def CHECK_CHARSET_EXISTS(conf, charset, outcharset='UCS-2LE', headers=None, define=None): '''check that a named charset is able to be used with iconv_open() for conversion to a target charset ''' msg = 'Checking if can we convert from %s to %s' % (charset, outcharset) if define is None: define = 'HAVE_CHARSET_%s' % charset.upper().replace('-','_') return conf.CHECK_CODE(''' iconv_t cd = iconv_open("%s", "%s"); if (cd == 0 || cd == (iconv_t)-1) return -1; ''' % (charset, outcharset), define=define, execute=True, msg=msg, lib='iconv', headers=headers) def find_config_dir(conf): '''find a directory to run tests in''' k = 0 while k < 10000: dir = os.path.join(conf.blddir, '.conf_check_%d' % k) try: shutil.rmtree(dir) except OSError: pass try: os.stat(dir) except: break k += 1 try: os.makedirs(dir) except: conf.fatal('cannot create a configuration test folder %r' % dir) try: os.stat(dir) except: conf.fatal('cannot use the configuration test folder %r' % dir) return dir @conf def CHECK_SHLIB_INTRASINC_NAME_FLAGS(conf, msg): ''' check if the waf default flags for setting the name of lib are ok ''' snip = ''' int foo(int v) { return v * 2; } ''' return conf.check(features='cc cshlib',vnum="1",fragment=snip,msg=msg) @conf def CHECK_NEED_LC(conf, msg): '''check if we need -lc''' dir = find_config_dir(conf) env = conf.env bdir = os.path.join(dir, 'testbuild2') if not os.path.exists(bdir): os.makedirs(bdir) subdir = os.path.join(dir, "liblctest") os.makedirs(subdir) dest = open(os.path.join(subdir, 'liblc1.c'), 'w') dest.write('#include \nint lib_func(void) { FILE *f = fopen("foo", "r");}\n') dest.close() bld = Build.BuildContext() bld.log = conf.log bld.all_envs.update(conf.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) bld.rescan(bld.srcnode) bld(features='cc cshlib', source='liblctest/liblc1.c', ldflags=conf.env['EXTRA_LDFLAGS'], target='liblc', name='liblc') try: bld.compile() conf.check_message(msg, '', True) return True except: conf.check_message(msg, '', False) return False @conf def CHECK_SHLIB_W_PYTHON(conf, msg): '''check if we need -undefined dynamic_lookup''' dir = find_config_dir(conf) env = conf.env snip = ''' #include #include #define environ (*_NSGetEnviron()) static PyObject *ldb_module = NULL; int foo(int v) { extern char **environ; environ[0] = 1; ldb_module = PyImport_ImportModule("ldb"); return v * 2; }''' return conf.check(features='cc cshlib',uselib='PYEMBED',fragment=snip,msg=msg) # this one is quite complex, and should probably be broken up # into several parts. I'd quite like to create a set of CHECK_COMPOUND() # functions that make writing complex compound tests like this much easier @conf def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None): '''see if the platform supports building libraries''' if msg is None: if rpath: msg = "rpath library support" else: msg = "building library support" dir = find_config_dir(conf) bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) env = conf.env subdir = os.path.join(dir, "libdir") os.makedirs(subdir) dest = open(os.path.join(subdir, 'lib1.c'), 'w') dest.write('int lib_func(void) { return 42; }\n') dest.close() dest = open(os.path.join(dir, 'main.c'), 'w') dest.write('int main(void) {return !(lib_func() == 42);}\n') dest.close() bld = Build.BuildContext() bld.log = conf.log bld.all_envs.update(conf.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) bld.rescan(bld.srcnode) ldflags = [] if version_script: ldflags.append("-Wl,--version-script=%s/vscript" % bld.path.abspath()) dest = open(os.path.join(dir,'vscript'), 'w') dest.write('TEST_1.0A2 { global: *; };\n') dest.close() bld(features='cc cshlib', source='libdir/lib1.c', target='libdir/lib1', ldflags=ldflags, name='lib1') o = bld(features='cc cprogram', source='main.c', target='prog1', uselib_local='lib1') if rpath: o.rpath=os.path.join(bdir, 'default/libdir') # compile the program try: bld.compile() except: conf.check_message(msg, '', False) return False # path for execution lastprog = o.link_task.outputs[0].abspath(env) if not rpath: if 'LD_LIBRARY_PATH' in os.environ: old_ld_library_path = os.environ['LD_LIBRARY_PATH'] else: old_ld_library_path = None ADD_LD_LIBRARY_PATH(os.path.join(bdir, 'default/libdir')) # we need to run the program, try to get its result args = conf.SAMBA_CROSS_ARGS(msg=msg) proc = Utils.pproc.Popen([lastprog] + args, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE) (out, err) = proc.communicate() w = conf.log.write w(str(out)) w('\n') w(str(err)) w('\nreturncode %r\n' % proc.returncode) ret = (proc.returncode == 0) if not rpath: os.environ['LD_LIBRARY_PATH'] = old_ld_library_path or '' conf.check_message(msg, '', ret) return ret @conf def CHECK_PERL_MANPAGE(conf, msg=None, section=None): '''work out what extension perl uses for manpages''' if msg is None: if section: msg = "perl man%s extension" % section else: msg = "perl manpage generation" conf.check_message_1(msg) dir = find_config_dir(conf) bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) dest = open(os.path.join(bdir, 'Makefile.PL'), 'w') dest.write(""" use ExtUtils::MakeMaker; WriteMakefile( 'NAME' => 'WafTest', 'EXE_FILES' => [ 'WafTest' ] ); """) dest.close() back = os.path.abspath('.') os.chdir(bdir) proc = Utils.pproc.Popen(['perl', 'Makefile.PL'], stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE) (out, err) = proc.communicate() os.chdir(back) ret = (proc.returncode == 0) if not ret: conf.check_message_2('not found', color='YELLOW') return if section: f = open(os.path.join(bdir,'Makefile'), 'r') man = f.read() f.close() m = re.search('MAN%sEXT\s+=\s+(\w+)' % section, man) if not m: conf.check_message_2('not found', color='YELLOW') return ext = m.group(1) conf.check_message_2(ext) return ext conf.check_message_2('ok') return True @conf def CHECK_COMMAND(conf, cmd, msg=None, define=None, on_target=True, boolean=False): '''run a command and return result''' if msg is None: msg = 'Checking %s' % ' '.join(cmd) conf.COMPOUND_START(msg) cmd = cmd[:] if on_target: cmd.extend(conf.SAMBA_CROSS_ARGS(msg=msg)) try: ret = Utils.cmd_output(cmd) except: conf.COMPOUND_END(False) return False if boolean: conf.COMPOUND_END('ok') if define: conf.DEFINE(define, '1') else: ret = ret.strip() conf.COMPOUND_END(ret) if define: conf.DEFINE(define, ret, quote=True) return ret @conf def CHECK_UNAME(conf): '''setup SYSTEM_UNAME_* defines''' ret = True for v in "sysname machine release version".split(): if not conf.CHECK_CODE(''' struct utsname n; if (uname(&n) == -1) return -1; printf("%%s", n.%s); ''' % v, define='SYSTEM_UNAME_%s' % v.upper(), execute=True, define_ret=True, quote=True, headers='sys/utsname.h', local_include=False, msg="Checking uname %s type" % v): ret = False return ret @conf def CHECK_INLINE(conf): '''check for the right value for inline''' conf.COMPOUND_START('Checking for inline') for i in ['inline', '__inline__', '__inline']: ret = conf.CHECK_CODE(''' typedef int foo_t; static %s foo_t static_foo () {return 0; } %s foo_t foo () {return 0; }''' % (i, i), define='INLINE_MACRO', addmain=False, link=False) if ret: if i != 'inline': conf.DEFINE('inline', i, quote=False) break if not ret: conf.COMPOUND_END(ret) else: conf.COMPOUND_END(i) return ret @conf def CHECK_XSLTPROC_MANPAGES(conf): '''check if xsltproc can run with the given stylesheets''' if not conf.CONFIG_SET('XSLTPROC'): conf.find_program('xsltproc', var='XSLTPROC') if not conf.CONFIG_SET('XSLTPROC'): return False s='http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl' conf.CHECK_COMMAND('%s --nonet %s 2> /dev/null' % (conf.env.XSLTPROC, s), msg='Checking for stylesheet %s' % s, define='XSLTPROC_MANPAGES', on_target=False, boolean=True) if not conf.CONFIG_SET('XSLTPROC_MANPAGES'): print "A local copy of the docbook.xsl wasn't found on your system" \ " consider installing package like docbook-xsl" ntdb-1.0/buildtools/wafsamba/samba_cross.py000066400000000000000000000103471224151530700211150ustar00rootroot00000000000000# functions for handling cross-compilation import Utils, Logs, sys, os, Options, re from Configure import conf real_Popen = None ANSWER_UNKNOWN = (254, "") ANSWER_FAIL = (255, "") ANSWER_OK = (0, "") cross_answers_incomplete = False def add_answer(ca_file, msg, answer): '''add an answer to a set of cross answers''' try: f = open(ca_file, 'a') except: Logs.error("Unable to open cross-answers file %s" % ca_file) sys.exit(1) if answer == ANSWER_OK: f.write('%s: OK\n' % msg) elif answer == ANSWER_UNKNOWN: f.write('%s: UNKNOWN\n' % msg) elif answer == ANSWER_FAIL: f.write('%s: FAIL\n' % msg) else: (retcode, retstring) = answer f.write('%s: (%d, "%s")' % (msg, retcode, retstring)) f.close() def cross_answer(ca_file, msg): '''return a (retcode,retstring) tuple from a answers file''' try: f = open(ca_file, 'r') except: add_answer(ca_file, msg, ANSWER_UNKNOWN) return ANSWER_UNKNOWN for line in f: line = line.strip() if line == '' or line[0] == '#': continue if line.find(':') != -1: a = line.split(':') thismsg = a[0].strip() if thismsg != msg: continue ans = a[1].strip() if ans == "OK" or ans == "YES": f.close() return ANSWER_OK elif ans == "UNKNOWN": f.close() return ANSWER_UNKNOWN elif ans == "FAIL" or ans == "NO": f.close() return ANSWER_FAIL elif ans[0] == '"': return (0, ans.strip('"')) elif ans[0] == "'": return (0, ans.strip("'")) else: m = re.match('\(\s*(-?\d+)\s*,\s*\"(.*)\"\s*\)', ans) if m: f.close() return (int(m.group(1)), m.group(2)) else: raise Utils.WafError("Bad answer format '%s' in %s" % (line, ca_file)) f.close() add_answer(ca_file, msg, ANSWER_UNKNOWN) return ANSWER_UNKNOWN class cross_Popen(Utils.pproc.Popen): '''cross-compilation wrapper for Popen''' def __init__(*k, **kw): (obj, args) = k if '--cross-execute' in args: # when --cross-execute is set, then change the arguments # to use the cross emulator i = args.index('--cross-execute') newargs = args[i+1].split() newargs.extend(args[0:i]) args = newargs elif '--cross-answers' in args: # when --cross-answers is set, then change the arguments # to use the cross answers if available i = args.index('--cross-answers') ca_file = args[i+1] msg = args[i+2] ans = cross_answer(ca_file, msg) if ans == ANSWER_UNKNOWN: global cross_answers_incomplete cross_answers_incomplete = True (retcode, retstring) = ans args = ['/bin/sh', '-c', "echo -n '%s'; exit %d" % (retstring, retcode)] real_Popen.__init__(*(obj, args), **kw) @conf def SAMBA_CROSS_ARGS(conf, msg=None): '''get exec_args to pass when running cross compiled binaries''' if not conf.env.CROSS_COMPILE: return [] global real_Popen if real_Popen is None: real_Popen = Utils.pproc.Popen Utils.pproc.Popen = cross_Popen ret = [] if conf.env.CROSS_EXECUTE: ret.extend(['--cross-execute', conf.env.CROSS_EXECUTE]) elif conf.env.CROSS_ANSWERS: if msg is None: raise Utils.WafError("Cannot have NULL msg in cross-answers") ret.extend(['--cross-answers', os.path.join(Options.launch_dir, conf.env.CROSS_ANSWERS), msg]) if ret == []: raise Utils.WafError("Cannot cross-compile without either --cross-execute or --cross-answers") return ret @conf def SAMBA_CROSS_CHECK_COMPLETE(conf): '''check if we have some unanswered questions''' global cross_answers_incomplete if conf.env.CROSS_COMPILE and cross_answers_incomplete: raise Utils.WafError("Cross answers file %s is incomplete" % conf.env.CROSS_ANSWERS) return True ntdb-1.0/buildtools/wafsamba/samba_deps.py000066400000000000000000001217551224151530700207250ustar00rootroot00000000000000# Samba automatic dependency handling and project rules import Build, os, sys, re, Environment, Logs, time from samba_utils import * from samba_autoconf import * from samba_bundled import BUILTIN_LIBRARY @conf def ADD_GLOBAL_DEPENDENCY(ctx, dep): '''add a dependency for all binaries and libraries''' if not 'GLOBAL_DEPENDENCIES' in ctx.env: ctx.env.GLOBAL_DEPENDENCIES = [] ctx.env.GLOBAL_DEPENDENCIES.append(dep) @conf def BREAK_CIRCULAR_LIBRARY_DEPENDENCIES(ctx): '''indicate that circular dependencies between libraries should be broken.''' ctx.env.ALLOW_CIRCULAR_LIB_DEPENDENCIES = True @conf def SET_SYSLIB_DEPS(conf, target, deps): '''setup some implied dependencies for a SYSLIB''' cache = LOCAL_CACHE(conf, 'SYSLIB_DEPS') cache[target] = deps def expand_subsystem_deps(bld): '''expand the reverse dependencies resulting from subsystem attributes of modules. This is walking over the complete list of declared subsystems, and expands the samba_deps_extended list for any module<->subsystem dependencies''' subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for subsystem_name in subsystem_list: bld.ASSERT(subsystem_name in targets, "Subsystem target %s not declared" % subsystem_name) type = targets[subsystem_name] if type == 'DISABLED' or type == 'EMPTY': continue # for example, # subsystem_name = dcerpc_server (a subsystem) # subsystem = dcerpc_server (a subsystem object) # module_name = rpc_epmapper (a module within the dcerpc_server subsystem) # module = rpc_epmapper (a module object within the dcerpc_server subsystem) subsystem = bld.name_to_obj(subsystem_name, bld.env) bld.ASSERT(subsystem is not None, "Unable to find subsystem %s" % subsystem_name) for d in subsystem_list[subsystem_name]: module_name = d['TARGET'] module_type = targets[module_name] if module_type in ['DISABLED', 'EMPTY']: continue bld.ASSERT(subsystem is not None, "Subsystem target %s for %s (%s) not found" % (subsystem_name, module_name, module_type)) if module_type in ['SUBSYSTEM']: # if a module is a plain object type (not a library) then the # subsystem it is part of needs to have it as a dependency, so targets # that depend on this subsystem get the modules of that subsystem subsystem.samba_deps_extended.append(module_name) subsystem.samba_deps_extended = unique_list(subsystem.samba_deps_extended) def build_dependencies(self): '''This builds the dependency list for a target. It runs after all the targets are declared The reason this is not just done in the SAMBA_*() rules is that we have no way of knowing the full dependency list for a target until we have all of the targets declared. ''' if self.samba_type in ['LIBRARY', 'BINARY', 'PYTHON']: self.uselib = list(self.final_syslibs) self.uselib_local = list(self.final_libs) self.add_objects = list(self.final_objects) # extra link flags from pkg_config libs = self.final_syslibs.copy() (ccflags, ldflags, cpppath) = library_flags(self, list(libs)) new_ldflags = getattr(self, 'samba_ldflags', [])[:] new_ldflags.extend(ldflags) self.ldflags = new_ldflags if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ldflags: for f in self.env.undefined_ldflags: self.ldflags.remove(f) if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ignore_ldflags: for f in self.env.undefined_ignore_ldflags: self.ldflags.append(f) debug('deps: computed dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s', self.sname, self.uselib, self.uselib_local, self.add_objects) if self.samba_type in ['SUBSYSTEM']: # this is needed for the ccflags of libs that come from pkg_config self.uselib = list(self.final_syslibs) self.uselib.extend(list(self.direct_syslibs)) for lib in self.final_libs: t = self.bld.name_to_obj(lib, self.bld.env) self.uselib.extend(list(t.final_syslibs)) self.uselib = unique_list(self.uselib) if getattr(self, 'uselib', None): up_list = [] for l in self.uselib: up_list.append(l.upper()) self.uselib = up_list def build_includes(self): '''This builds the right set of includes for a target. One tricky part of this is that the includes= attribute for a target needs to use paths which are relative to that targets declaration directory (which we can get at via t.path). The way this works is the includes list gets added as samba_includes in the main build task declaration. Then this function runs after all of the tasks are declared, and it processes the samba_includes attribute to produce a includes= attribute ''' if getattr(self, 'samba_includes', None) is None: return bld = self.bld inc_deps = includes_objects(bld, self, set(), {}) includes = [] # maybe add local includes if getattr(self, 'local_include', True) and getattr(self, 'local_include_first', True): includes.append('.') includes.extend(self.samba_includes_extended) if 'EXTRA_INCLUDES' in bld.env and getattr(self, 'global_include', True): includes.extend(bld.env['EXTRA_INCLUDES']) includes.append('#') inc_set = set() inc_abs = [] for d in inc_deps: t = bld.name_to_obj(d, bld.env) bld.ASSERT(t is not None, "Unable to find dependency %s for %s" % (d, self.sname)) inclist = getattr(t, 'samba_includes_extended', [])[:] if getattr(t, 'local_include', True): inclist.append('.') if inclist == []: continue tpath = t.samba_abspath for inc in inclist: npath = tpath + '/' + inc if not npath in inc_set: inc_abs.append(npath) inc_set.add(npath) mypath = self.path.abspath(bld.env) for inc in inc_abs: relpath = os_path_relpath(inc, mypath) includes.append(relpath) if getattr(self, 'local_include', True) and not getattr(self, 'local_include_first', True): includes.append('.') # now transform the includes list to be relative to the top directory # which is represented by '#' in waf. This allows waf to cache the # includes lists more efficiently includes_top = [] for i in includes: if i[0] == '#': # some are already top based includes_top.append(i) continue absinc = os.path.join(self.path.abspath(), i) relinc = os_path_relpath(absinc, self.bld.srcnode.abspath()) includes_top.append('#' + relinc) self.includes = unique_list(includes_top) debug('deps: includes for target %s: includes=%s', self.sname, self.includes) def add_init_functions(self): '''This builds the right set of init functions''' bld = self.bld subsystems = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') # cope with the separated object lists from BINARY and LIBRARY targets sname = self.sname if sname.endswith('.objlist'): sname = sname[0:-8] modules = [] if sname in subsystems: modules.append(sname) m = getattr(self, 'samba_modules', None) if m is not None: modules.extend(TO_LIST(m)) m = getattr(self, 'samba_subsystem', None) if m is not None: modules.append(m) sentinel = getattr(self, 'init_function_sentinel', 'NULL') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') cflags = getattr(self, 'samba_cflags', [])[:] if modules == []: sname = sname.replace('-','_') sname = sname.replace('/','_') cflags.append('-DSTATIC_%s_MODULES=%s' % (sname, sentinel)) if sentinel == 'NULL': cflags.append('-DSTATIC_%s_MODULES_PROTO=' % sname) self.ccflags = cflags return for m in modules: bld.ASSERT(m in subsystems, "No init_function defined for module '%s' in target '%s'" % (m, self.sname)) init_fn_list = [] for d in subsystems[m]: if targets[d['TARGET']] != 'DISABLED': init_fn_list.append(d['INIT_FUNCTION']) if init_fn_list == []: cflags.append('-DSTATIC_%s_MODULES=%s' % (m, sentinel)) if sentinel == 'NULL': cflags.append('-DSTATIC_%s_MODULES_PROTO' % m) else: cflags.append('-DSTATIC_%s_MODULES=%s' % (m, ','.join(init_fn_list) + ',' + sentinel)) proto='' for f in init_fn_list: proto = proto + '_MODULE_PROTO(%s)' % f cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto)) self.ccflags = cflags def check_duplicate_sources(bld, tgt_list): '''see if we are compiling the same source file more than once without an allow_duplicates attribute''' debug('deps: checking for duplicate sources') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') ret = True global tstart for t in tgt_list: source_list = TO_LIST(getattr(t, 'source', '')) tpath = os.path.normpath(os_path_relpath(t.path.abspath(bld.env), t.env.BUILD_DIRECTORY + '/default')) obj_sources = set() for s in source_list: p = os.path.normpath(os.path.join(tpath, s)) if p in obj_sources: Logs.error("ERROR: source %s appears twice in target '%s'" % (p, t.sname)) sys.exit(1) obj_sources.add(p) t.samba_source_set = obj_sources subsystems = {} # build a list of targets that each source file is part of for t in tgt_list: sources = [] if not targets[t.sname] in [ 'LIBRARY', 'BINARY', 'PYTHON' ]: continue for obj in t.add_objects: t2 = t.bld.name_to_obj(obj, bld.env) source_set = getattr(t2, 'samba_source_set', set()) for s in source_set: if not s in subsystems: subsystems[s] = {} if not t.sname in subsystems[s]: subsystems[s][t.sname] = [] subsystems[s][t.sname].append(t2.sname) for s in subsystems: if len(subsystems[s]) > 1 and Options.options.SHOW_DUPLICATES: Logs.warn("WARNING: source %s is in more than one target: %s" % (s, subsystems[s].keys())) for tname in subsystems[s]: if len(subsystems[s][tname]) > 1: raise Utils.WafError("ERROR: source %s is in more than one subsystem of target '%s': %s" % (s, tname, subsystems[s][tname])) return ret def check_orphaned_targets(bld, tgt_list): '''check if any build targets are orphaned''' target_dict = LOCAL_CACHE(bld, 'TARGET_TYPE') debug('deps: checking for orphaned targets') for t in tgt_list: if getattr(t, 'samba_used', False): continue type = target_dict[t.sname] if not type in ['BINARY', 'LIBRARY', 'MODULE', 'ET', 'PYTHON']: if re.search('^PIDL_', t.sname) is None: Logs.warn("Target %s of type %s is unused by any other target" % (t.sname, type)) def check_group_ordering(bld, tgt_list): '''see if we have any dependencies that violate the group ordering It is an error for a target to depend on a target from a later build group ''' def group_name(g): tm = bld.task_manager return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0] for g in bld.task_manager.groups: gname = group_name(g) for t in g.tasks_gen: t.samba_group = gname grp_map = {} idx = 0 for g in bld.task_manager.groups: name = group_name(g) grp_map[name] = idx idx += 1 targets = LOCAL_CACHE(bld, 'TARGET_TYPE') ret = True for t in tgt_list: tdeps = getattr(t, 'add_objects', []) + getattr(t, 'uselib_local', []) for d in tdeps: t2 = bld.name_to_obj(d, bld.env) if t2 is None: continue map1 = grp_map[t.samba_group] map2 = grp_map[t2.samba_group] if map2 > map1: Logs.error("Target %r in build group %r depends on target %r from later build group %r" % ( t.sname, t.samba_group, t2.sname, t2.samba_group)) ret = False return ret def show_final_deps(bld, tgt_list): '''show the final dependencies for all targets''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for t in tgt_list: if not targets[t.sname] in ['LIBRARY', 'BINARY', 'PYTHON', 'SUBSYSTEM']: continue debug('deps: final dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s', t.sname, t.uselib, getattr(t, 'uselib_local', []), getattr(t, 'add_objects', [])) def add_samba_attributes(bld, tgt_list): '''ensure a target has a the required samba attributes''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for t in tgt_list: if t.name != '': t.sname = t.name else: t.sname = t.target t.samba_type = targets[t.sname] t.samba_abspath = t.path.abspath(bld.env) t.samba_deps_extended = t.samba_deps[:] t.samba_includes_extended = TO_LIST(t.samba_includes)[:] t.ccflags = getattr(t, 'samba_cflags', '') def replace_grouping_libraries(bld, tgt_list): '''replace dependencies based on grouping libraries If a library is marked as a grouping library, then any target that depends on a subsystem that is part of that grouping library gets that dependency replaced with a dependency on the grouping library ''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') grouping = {} # find our list of grouping libraries, mapped from the subsystems they depend on for t in tgt_list: if not getattr(t, 'grouping_library', False): continue for dep in t.samba_deps_extended: bld.ASSERT(dep in targets, "grouping library target %s not declared in %s" % (dep, t.sname)) if targets[dep] == 'SUBSYSTEM': grouping[dep] = t.sname # now replace any dependencies on elements of grouping libraries for t in tgt_list: for i in range(len(t.samba_deps_extended)): dep = t.samba_deps_extended[i] if dep in grouping: if t.sname != grouping[dep]: debug("deps: target %s: replacing dependency %s with grouping library %s" % (t.sname, dep, grouping[dep])) t.samba_deps_extended[i] = grouping[dep] def build_direct_deps(bld, tgt_list): '''build the direct_objects and direct_libs sets for each target''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') syslib_deps = LOCAL_CACHE(bld, 'SYSLIB_DEPS') global_deps = bld.env.GLOBAL_DEPENDENCIES global_deps_exclude = set() for dep in global_deps: t = bld.name_to_obj(dep, bld.env) for d in t.samba_deps: # prevent loops from the global dependencies list global_deps_exclude.add(d) global_deps_exclude.add(d + '.objlist') for t in tgt_list: t.direct_objects = set() t.direct_libs = set() t.direct_syslibs = set() deps = t.samba_deps_extended[:] if getattr(t, 'samba_use_global_deps', False) and not t.sname in global_deps_exclude: deps.extend(global_deps) for d in deps: if d == t.sname: continue if not d in targets: Logs.error("Unknown dependency '%s' in '%s'" % (d, t.sname)) sys.exit(1) if targets[d] in [ 'EMPTY', 'DISABLED' ]: continue if targets[d] == 'PYTHON' and targets[t.sname] != 'PYTHON' and t.sname.find('.objlist') == -1: # this check should be more restrictive, but for now we have pidl-generated python # code that directly depends on other python modules Logs.error('ERROR: Target %s has dependency on python module %s' % (t.sname, d)) sys.exit(1) if targets[d] == 'SYSLIB': t.direct_syslibs.add(d) if d in syslib_deps: for implied in TO_LIST(syslib_deps[d]): if BUILTIN_LIBRARY(bld, implied): t.direct_objects.add(implied) elif targets[implied] == 'SYSLIB': t.direct_syslibs.add(implied) elif targets[implied] in ['LIBRARY', 'MODULE']: t.direct_libs.add(implied) else: Logs.error('Implied dependency %s in %s is of type %s' % ( implied, t.sname, targets[implied])) sys.exit(1) continue t2 = bld.name_to_obj(d, bld.env) if t2 is None: Logs.error("no task %s of type %s in %s" % (d, targets[d], t.sname)) sys.exit(1) if t2.samba_type in [ 'LIBRARY', 'MODULE' ]: t.direct_libs.add(d) elif t2.samba_type in [ 'SUBSYSTEM', 'ASN1', 'PYTHON' ]: t.direct_objects.add(d) debug('deps: built direct dependencies') def dependency_loop(loops, t, target): '''add a dependency loop to the loops dictionary''' if t.sname == target: return if not target in loops: loops[target] = set() if not t.sname in loops[target]: loops[target].add(t.sname) def indirect_libs(bld, t, chain, loops): '''recursively calculate the indirect library dependencies for a target An indirect library is a library that results from a dependency on a subsystem ''' ret = getattr(t, 'indirect_libs', None) if ret is not None: return ret ret = set() for obj in t.direct_objects: if obj in chain: dependency_loop(loops, t, obj) continue chain.add(obj) t2 = bld.name_to_obj(obj, bld.env) r2 = indirect_libs(bld, t2, chain, loops) chain.remove(obj) ret = ret.union(t2.direct_libs) ret = ret.union(r2) for obj in indirect_objects(bld, t, set(), loops): if obj in chain: dependency_loop(loops, t, obj) continue chain.add(obj) t2 = bld.name_to_obj(obj, bld.env) r2 = indirect_libs(bld, t2, chain, loops) chain.remove(obj) ret = ret.union(t2.direct_libs) ret = ret.union(r2) t.indirect_libs = ret return ret def indirect_objects(bld, t, chain, loops): '''recursively calculate the indirect object dependencies for a target indirect objects are the set of objects from expanding the subsystem dependencies ''' ret = getattr(t, 'indirect_objects', None) if ret is not None: return ret ret = set() for lib in t.direct_objects: if lib in chain: dependency_loop(loops, t, lib) continue chain.add(lib) t2 = bld.name_to_obj(lib, bld.env) r2 = indirect_objects(bld, t2, chain, loops) chain.remove(lib) ret = ret.union(t2.direct_objects) ret = ret.union(r2) t.indirect_objects = ret return ret def extended_objects(bld, t, chain): '''recursively calculate the extended object dependencies for a target extended objects are the union of: - direct objects - indirect objects - direct and indirect objects of all direct and indirect libraries ''' ret = getattr(t, 'extended_objects', None) if ret is not None: return ret ret = set() ret = ret.union(t.final_objects) for lib in t.final_libs: if lib in chain: continue t2 = bld.name_to_obj(lib, bld.env) chain.add(lib) r2 = extended_objects(bld, t2, chain) chain.remove(lib) ret = ret.union(t2.final_objects) ret = ret.union(r2) t.extended_objects = ret return ret def includes_objects(bld, t, chain, inc_loops): '''recursively calculate the includes object dependencies for a target includes dependencies come from either library or object dependencies ''' ret = getattr(t, 'includes_objects', None) if ret is not None: return ret ret = t.direct_objects.copy() ret = ret.union(t.direct_libs) for obj in t.direct_objects: if obj in chain: dependency_loop(inc_loops, t, obj) continue chain.add(obj) t2 = bld.name_to_obj(obj, bld.env) r2 = includes_objects(bld, t2, chain, inc_loops) chain.remove(obj) ret = ret.union(t2.direct_objects) ret = ret.union(r2) for lib in t.direct_libs: if lib in chain: dependency_loop(inc_loops, t, lib) continue chain.add(lib) t2 = bld.name_to_obj(lib, bld.env) if t2 is None: targets = LOCAL_CACHE(bld, 'TARGET_TYPE') Logs.error('Target %s of type %s not found in direct_libs for %s' % ( lib, targets[lib], t.sname)) sys.exit(1) r2 = includes_objects(bld, t2, chain, inc_loops) chain.remove(lib) ret = ret.union(t2.direct_objects) ret = ret.union(r2) t.includes_objects = ret return ret def break_dependency_loops(bld, tgt_list): '''find and break dependency loops''' loops = {} inc_loops = {} # build up the list of loops for t in tgt_list: indirect_objects(bld, t, set(), loops) indirect_libs(bld, t, set(), loops) includes_objects(bld, t, set(), inc_loops) # break the loops for t in tgt_list: if t.sname in loops: for attr in ['direct_objects', 'indirect_objects', 'direct_libs', 'indirect_libs']: objs = getattr(t, attr, set()) setattr(t, attr, objs.difference(loops[t.sname])) for loop in loops: debug('deps: Found dependency loops for target %s : %s', loop, loops[loop]) for loop in inc_loops: debug('deps: Found include loops for target %s : %s', loop, inc_loops[loop]) # expand the loops mapping by one level for loop in loops.copy(): for tgt in loops[loop]: if tgt in loops: loops[loop] = loops[loop].union(loops[tgt]) for loop in inc_loops.copy(): for tgt in inc_loops[loop]: if tgt in inc_loops: inc_loops[loop] = inc_loops[loop].union(inc_loops[tgt]) # expand indirect subsystem and library loops for loop in loops.copy(): t = bld.name_to_obj(loop, bld.env) if t.samba_type in ['SUBSYSTEM']: loops[loop] = loops[loop].union(t.indirect_objects) loops[loop] = loops[loop].union(t.direct_objects) if t.samba_type in ['LIBRARY','PYTHON']: loops[loop] = loops[loop].union(t.indirect_libs) loops[loop] = loops[loop].union(t.direct_libs) if loop in loops[loop]: loops[loop].remove(loop) # expand indirect includes loops for loop in inc_loops.copy(): t = bld.name_to_obj(loop, bld.env) inc_loops[loop] = inc_loops[loop].union(t.includes_objects) if loop in inc_loops[loop]: inc_loops[loop].remove(loop) # add in the replacement dependencies for t in tgt_list: for loop in loops: for attr in ['indirect_objects', 'indirect_libs']: objs = getattr(t, attr, set()) if loop in objs: diff = loops[loop].difference(objs) if t.sname in diff: diff.remove(t.sname) if diff: debug('deps: Expanded target %s of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff) objs = objs.union(diff) setattr(t, attr, objs) for loop in inc_loops: objs = getattr(t, 'includes_objects', set()) if loop in objs: diff = inc_loops[loop].difference(objs) if t.sname in diff: diff.remove(t.sname) if diff: debug('deps: Expanded target %s includes of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff) objs = objs.union(diff) setattr(t, 'includes_objects', objs) def reduce_objects(bld, tgt_list): '''reduce objects by looking for indirect object dependencies''' rely_on = {} for t in tgt_list: t.extended_objects = None changed = False for type in ['BINARY', 'PYTHON', 'LIBRARY']: for t in tgt_list: if t.samba_type != type: continue # if we will indirectly link to a target then we don't need it new = t.final_objects.copy() for l in t.final_libs: t2 = bld.name_to_obj(l, bld.env) t2_obj = extended_objects(bld, t2, set()) dup = new.intersection(t2_obj) if t.sname in rely_on: dup = dup.difference(rely_on[t.sname]) if dup: debug('deps: removing dups from %s of type %s: %s also in %s %s', t.sname, t.samba_type, dup, t2.samba_type, l) new = new.difference(dup) changed = True if not l in rely_on: rely_on[l] = set() rely_on[l] = rely_on[l].union(dup) t.final_objects = new if not changed: return False # add back in any objects that were relied upon by the reduction rules for r in rely_on: t = bld.name_to_obj(r, bld.env) t.final_objects = t.final_objects.union(rely_on[r]) return True def show_library_loop(bld, lib1, lib2, path, seen): '''show the detailed path of a library loop between lib1 and lib2''' t = bld.name_to_obj(lib1, bld.env) if not lib2 in getattr(t, 'final_libs', set()): return for d in t.samba_deps_extended: if d in seen: continue seen.add(d) path2 = path + '=>' + d if d == lib2: Logs.warn('library loop path: ' + path2) return show_library_loop(bld, d, lib2, path2, seen) seen.remove(d) def calculate_final_deps(bld, tgt_list, loops): '''calculate the final library and object dependencies''' for t in tgt_list: # start with the maximum possible list t.final_libs = t.direct_libs.union(indirect_libs(bld, t, set(), loops)) t.final_objects = t.direct_objects.union(indirect_objects(bld, t, set(), loops)) for t in tgt_list: # don't depend on ourselves if t.sname in t.final_libs: t.final_libs.remove(t.sname) if t.sname in t.final_objects: t.final_objects.remove(t.sname) # handle any non-shared binaries for t in tgt_list: if t.samba_type == 'BINARY' and bld.NONSHARED_BINARY(t.sname): subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') # replace lib deps with objlist deps for l in t.final_libs: objname = l + '.objlist' t2 = bld.name_to_obj(objname, bld.env) if t2 is None: Logs.error('ERROR: subsystem %s not found' % objname) sys.exit(1) t.final_objects.add(objname) t.final_objects = t.final_objects.union(extended_objects(bld, t2, set())) if l in subsystem_list: # its a subsystem - we also need the contents of any modules for d in subsystem_list[l]: module_name = d['TARGET'] if targets[module_name] == 'LIBRARY': objname = module_name + '.objlist' elif targets[module_name] == 'SUBSYSTEM': objname = module_name else: continue t2 = bld.name_to_obj(objname, bld.env) if t2 is None: Logs.error('ERROR: subsystem %s not found' % objname) sys.exit(1) t.final_objects.add(objname) t.final_objects = t.final_objects.union(extended_objects(bld, t2, set())) t.final_libs = set() # find any library loops for t in tgt_list: if t.samba_type in ['LIBRARY', 'PYTHON']: for l in t.final_libs.copy(): t2 = bld.name_to_obj(l, bld.env) if t.sname in t2.final_libs: if getattr(bld.env, "ALLOW_CIRCULAR_LIB_DEPENDENCIES", False): # we could break this in either direction. If one of the libraries # has a version number, and will this be distributed publicly, then # we should make it the lower level library in the DAG Logs.warn('deps: removing library loop %s from %s' % (t.sname, t2.sname)) dependency_loop(loops, t, t2.sname) t2.final_libs.remove(t.sname) else: Logs.error('ERROR: circular library dependency between %s and %s' % (t.sname, t2.sname)) show_library_loop(bld, t.sname, t2.sname, t.sname, set()) show_library_loop(bld, t2.sname, t.sname, t2.sname, set()) sys.exit(1) for loop in loops: debug('deps: Found dependency loops for target %s : %s', loop, loops[loop]) # we now need to make corrections for any library loops we broke up # any target that depended on the target of the loop and doesn't # depend on the source of the loop needs to get the loop source added for type in ['BINARY','PYTHON','LIBRARY','BINARY']: for t in tgt_list: if t.samba_type != type: continue for loop in loops: if loop in t.final_libs: diff = loops[loop].difference(t.final_libs) if t.sname in diff: diff.remove(t.sname) if t.sname in diff: diff.remove(t.sname) # make sure we don't recreate the loop again! for d in diff.copy(): t2 = bld.name_to_obj(d, bld.env) if t2.samba_type == 'LIBRARY': if t.sname in t2.final_libs: debug('deps: removing expansion %s from %s', d, t.sname) diff.remove(d) if diff: debug('deps: Expanded target %s by loop %s libraries (loop %s) %s', t.sname, loop, loops[loop], diff) t.final_libs = t.final_libs.union(diff) # remove objects that are also available in linked libs count = 0 while reduce_objects(bld, tgt_list): count += 1 if count > 100: Logs.warn("WARNING: Unable to remove all inter-target object duplicates") break debug('deps: Object reduction took %u iterations', count) # add in any syslib dependencies for t in tgt_list: if not t.samba_type in ['BINARY','PYTHON','LIBRARY','SUBSYSTEM']: continue syslibs = set() for d in t.final_objects: t2 = bld.name_to_obj(d, bld.env) syslibs = syslibs.union(t2.direct_syslibs) # this adds the indirect syslibs as well, which may not be needed # depending on the linker flags for d in t.final_libs: t2 = bld.name_to_obj(d, bld.env) syslibs = syslibs.union(t2.direct_syslibs) t.final_syslibs = syslibs # find any unresolved library loops lib_loop_error = False for t in tgt_list: if t.samba_type in ['LIBRARY', 'PYTHON']: for l in t.final_libs.copy(): t2 = bld.name_to_obj(l, bld.env) if t.sname in t2.final_libs: Logs.error('ERROR: Unresolved library loop %s from %s' % (t.sname, t2.sname)) lib_loop_error = True if lib_loop_error: sys.exit(1) debug('deps: removed duplicate dependencies') def show_dependencies(bld, target, seen): '''recursively show the dependencies of target''' if target in seen: return t = bld.name_to_obj(target, bld.env) if t is None: Logs.error("ERROR: Unable to find target '%s'" % target) sys.exit(1) Logs.info('%s(OBJECTS): %s' % (target, t.direct_objects)) Logs.info('%s(LIBS): %s' % (target, t.direct_libs)) Logs.info('%s(SYSLIBS): %s' % (target, t.direct_syslibs)) seen.add(target) for t2 in t.direct_objects: show_dependencies(bld, t2, seen) def show_object_duplicates(bld, tgt_list): '''show a list of object files that are included in more than one library or binary''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') used_by = {} Logs.info("showing duplicate objects") for t in tgt_list: if not targets[t.sname] in [ 'LIBRARY', 'PYTHON' ]: continue for n in getattr(t, 'final_objects', set()): t2 = bld.name_to_obj(n, bld.env) if not n in used_by: used_by[n] = set() used_by[n].add(t.sname) for n in used_by: if len(used_by[n]) > 1: Logs.info("target '%s' is used by %s" % (n, used_by[n])) Logs.info("showing indirect dependency counts (sorted by count)") def indirect_count(t1, t2): return len(t2.indirect_objects) - len(t1.indirect_objects) sorted_list = sorted(tgt_list, cmp=indirect_count) for t in sorted_list: if len(t.indirect_objects) > 1: Logs.info("%s depends on %u indirect objects" % (t.sname, len(t.indirect_objects))) ###################################################################### # this provides a way to save our dependency calculations between runs savedeps_version = 3 savedeps_inputs = ['samba_deps', 'samba_includes', 'local_include', 'local_include_first', 'samba_cflags', 'source', 'grouping_library', 'samba_ldflags', 'allow_undefined_symbols', 'use_global_deps', 'global_include' ] savedeps_outputs = ['uselib', 'uselib_local', 'add_objects', 'includes', 'ccflags', 'ldflags', 'samba_deps_extended'] savedeps_outenv = ['INC_PATHS'] savedeps_envvars = ['NONSHARED_BINARIES', 'GLOBAL_DEPENDENCIES', 'EXTRA_CFLAGS', 'EXTRA_LDFLAGS', 'EXTRA_INCLUDES' ] savedeps_caches = ['GLOBAL_DEPENDENCIES', 'TARGET_TYPE', 'INIT_FUNCTIONS', 'SYSLIB_DEPS'] savedeps_files = ['buildtools/wafsamba/samba_deps.py'] def save_samba_deps(bld, tgt_list): '''save the dependency calculations between builds, to make further builds faster''' denv = Environment.Environment() denv.version = savedeps_version denv.savedeps_inputs = savedeps_inputs denv.savedeps_outputs = savedeps_outputs denv.input = {} denv.output = {} denv.outenv = {} denv.caches = {} denv.envvar = {} denv.files = {} for f in savedeps_files: denv.files[f] = os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime for c in savedeps_caches: denv.caches[c] = LOCAL_CACHE(bld, c) for e in savedeps_envvars: denv.envvar[e] = bld.env[e] for t in tgt_list: # save all the input attributes for each target tdeps = {} for attr in savedeps_inputs: v = getattr(t, attr, None) if v is not None: tdeps[attr] = v if tdeps != {}: denv.input[t.sname] = tdeps # save all the output attributes for each target tdeps = {} for attr in savedeps_outputs: v = getattr(t, attr, None) if v is not None: tdeps[attr] = v if tdeps != {}: denv.output[t.sname] = tdeps tdeps = {} for attr in savedeps_outenv: if attr in t.env: tdeps[attr] = t.env[attr] if tdeps != {}: denv.outenv[t.sname] = tdeps depsfile = os.path.join(bld.bdir, "sambadeps") denv.store(depsfile) def load_samba_deps(bld, tgt_list): '''load a previous set of build dependencies if possible''' depsfile = os.path.join(bld.bdir, "sambadeps") denv = Environment.Environment() try: debug('deps: checking saved dependencies') denv.load(depsfile) if (denv.version != savedeps_version or denv.savedeps_inputs != savedeps_inputs or denv.savedeps_outputs != savedeps_outputs): return False except: return False # check if critical files have changed for f in savedeps_files: if f not in denv.files: return False if denv.files[f] != os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime: return False # check if caches are the same for c in savedeps_caches: if c not in denv.caches or denv.caches[c] != LOCAL_CACHE(bld, c): return False # check if caches are the same for e in savedeps_envvars: if e not in denv.envvar or denv.envvar[e] != bld.env[e]: return False # check inputs are the same for t in tgt_list: tdeps = {} for attr in savedeps_inputs: v = getattr(t, attr, None) if v is not None: tdeps[attr] = v if t.sname in denv.input: olddeps = denv.input[t.sname] else: olddeps = {} if tdeps != olddeps: #print '%s: \ntdeps=%s \nodeps=%s' % (t.sname, tdeps, olddeps) return False # put outputs in place for t in tgt_list: if not t.sname in denv.output: continue tdeps = denv.output[t.sname] for a in tdeps: setattr(t, a, tdeps[a]) # put output env vars in place for t in tgt_list: if not t.sname in denv.outenv: continue tdeps = denv.outenv[t.sname] for a in tdeps: t.env[a] = tdeps[a] debug('deps: loaded saved dependencies') return True def check_project_rules(bld): '''check the project rules - ensuring the targets are sane''' loops = {} inc_loops = {} tgt_list = get_tgt_list(bld) add_samba_attributes(bld, tgt_list) force_project_rules = (Options.options.SHOWDEPS or Options.options.SHOW_DUPLICATES) if not force_project_rules and load_samba_deps(bld, tgt_list): return global tstart tstart = time.clock() bld.new_rules = True Logs.info("Checking project rules ...") debug('deps: project rules checking started') expand_subsystem_deps(bld) debug("deps: expand_subsystem_deps: %f" % (time.clock() - tstart)) replace_grouping_libraries(bld, tgt_list) debug("deps: replace_grouping_libraries: %f" % (time.clock() - tstart)) build_direct_deps(bld, tgt_list) debug("deps: build_direct_deps: %f" % (time.clock() - tstart)) break_dependency_loops(bld, tgt_list) debug("deps: break_dependency_loops: %f" % (time.clock() - tstart)) if Options.options.SHOWDEPS: show_dependencies(bld, Options.options.SHOWDEPS, set()) calculate_final_deps(bld, tgt_list, loops) debug("deps: calculate_final_deps: %f" % (time.clock() - tstart)) if Options.options.SHOW_DUPLICATES: show_object_duplicates(bld, tgt_list) # run the various attribute generators for f in [ build_dependencies, build_includes, add_init_functions ]: debug('deps: project rules checking %s', f) for t in tgt_list: f(t) debug("deps: %s: %f" % (f, time.clock() - tstart)) debug('deps: project rules stage1 completed') #check_orphaned_targets(bld, tgt_list) if not check_duplicate_sources(bld, tgt_list): Logs.error("Duplicate sources present - aborting") sys.exit(1) debug("deps: check_duplicate_sources: %f" % (time.clock() - tstart)) if not check_group_ordering(bld, tgt_list): Logs.error("Bad group ordering - aborting") sys.exit(1) debug("deps: check_group_ordering: %f" % (time.clock() - tstart)) show_final_deps(bld, tgt_list) debug("deps: show_final_deps: %f" % (time.clock() - tstart)) debug('deps: project rules checking completed - %u targets checked', len(tgt_list)) if not bld.is_install: save_samba_deps(bld, tgt_list) debug("deps: save_samba_deps: %f" % (time.clock() - tstart)) Logs.info("Project rules pass") def CHECK_PROJECT_RULES(bld): '''enable checking of project targets for sanity''' if bld.env.added_project_rules: return bld.env.added_project_rules = True bld.add_pre_fun(check_project_rules) Build.BuildContext.CHECK_PROJECT_RULES = CHECK_PROJECT_RULES ntdb-1.0/buildtools/wafsamba/samba_dist.py000066400000000000000000000173361224151530700207340ustar00rootroot00000000000000# customised version of 'waf dist' for Samba tools # uses git ls-files to get file lists import Utils, os, sys, tarfile, stat, Scripting, Logs, Options from samba_utils import * dist_dirs = None dist_files = None dist_blacklist = "" def add_symlink(tar, fname, abspath, basedir): '''handle symlinks to directories that may move during packaging''' if not os.path.islink(abspath): return False tinfo = tar.gettarinfo(name=abspath, arcname=fname) tgt = os.readlink(abspath) if dist_dirs: # we need to find the target relative to the main directory # this is here to cope with symlinks into the buildtools # directory from within the standalone libraries in Samba. For example, # a symlink to ../../builtools/scripts/autogen-waf.sh needs # to be rewritten as a symlink to buildtools/scripts/autogen-waf.sh # when the tarball for talloc is built # the filename without the appname-version rel_fname = '/'.join(fname.split('/')[1:]) # join this with the symlink target tgt_full = os.path.join(os.path.dirname(rel_fname), tgt) # join with the base directory tgt_base = os.path.normpath(os.path.join(basedir, tgt_full)) # see if this is inside one of our dist_dirs for dir in dist_dirs.split(): if dir.find(':') != -1: destdir=dir.split(':')[1] dir=dir.split(':')[0] else: destdir = '.' if dir == basedir: # internal links don't get rewritten continue if dir == tgt_base[0:len(dir)] and tgt_base[len(dir)] == '/': new_tgt = destdir + tgt_base[len(dir):] tinfo.linkname = new_tgt break tinfo.uid = 0 tinfo.gid = 0 tinfo.uname = 'root' tinfo.gname = 'root' tar.addfile(tinfo) return True def add_tarfile(tar, fname, abspath, basedir): '''add a file to the tarball''' if add_symlink(tar, fname, abspath, basedir): return try: tinfo = tar.gettarinfo(name=abspath, arcname=fname) except OSError: Logs.error('Unable to find file %s - missing from git checkout?' % abspath) sys.exit(1) tinfo.uid = 0 tinfo.gid = 0 tinfo.uname = 'root' tinfo.gname = 'root' fh = open(abspath) tar.addfile(tinfo, fileobj=fh) fh.close() def vcs_dir_contents(path): """Return the versioned files under a path. :return: List of paths relative to path """ repo = path while repo != "/": if os.path.isdir(os.path.join(repo, ".git")): ls_files_cmd = [ 'git', 'ls-files', '--full-name', os_path_relpath(path, repo) ] cwd = None env = dict(os.environ) env["GIT_DIR"] = os.path.join(repo, ".git") break elif os.path.isdir(os.path.join(repo, ".bzr")): ls_files_cmd = [ 'bzr', 'ls', '--recursive', '--versioned', os_path_relpath(path, repo)] cwd = repo env = None break repo = os.path.dirname(repo) if repo == "/": raise Exception("unsupported or no vcs for %s" % path) return Utils.cmd_output(ls_files_cmd, cwd=cwd, env=env).split() def dist(appname='', version=''): def add_files_to_tarball(tar, srcdir, srcsubdir, dstdir, dstsubdir, blacklist, files): if blacklist is None: blacklist = [] for f in files: abspath = os.path.join(srcdir, f) if srcsubdir != '.': f = f[len(srcsubdir)+1:] # Remove files in the blacklist if f in blacklist: continue blacklisted = False # Remove directories in the blacklist for d in blacklist: if f.startswith(d): blacklisted = True if blacklisted: continue if os.path.isdir(abspath): continue if dstsubdir != '.': f = dstsubdir + '/' + f fname = dstdir + '/' + f add_tarfile(tar, fname, abspath, srcsubdir) def list_directory_files(abspath): out_files = [] for root, dirs, files in os.walk(abspath): for f in files: out_files.append(os.path.join(root, f)) return out_files if not isinstance(appname, str) or not appname: # this copes with a mismatch in the calling arguments for dist() appname = Utils.g_module.APPNAME version = Utils.g_module.VERSION if not version: version = Utils.g_module.VERSION srcdir = os.path.normpath(os.path.join(os.path.dirname(Utils.g_module.root_path), Utils.g_module.srcdir)) if not dist_dirs: Logs.error('You must use samba_dist.DIST_DIRS() to set which directories to package') sys.exit(1) dist_base = '%s-%s' % (appname, version) if Options.options.SIGN_RELEASE: dist_name = '%s.tar' % (dist_base) tar = tarfile.open(dist_name, 'w') else: dist_name = '%s.tar.gz' % (dist_base) tar = tarfile.open(dist_name, 'w:gz') blacklist = dist_blacklist.split() for dir in dist_dirs.split(): if dir.find(':') != -1: destdir=dir.split(':')[1] dir=dir.split(':')[0] else: destdir = '.' absdir = os.path.join(srcdir, dir) try: files = vcs_dir_contents(absdir) except Exception, e: Logs.error('unable to get contents of %s: %s' % (absdir, e)) sys.exit(1) add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files) if dist_files: for file in dist_files.split(): if file.find(':') != -1: destfile = file.split(':')[1] file = file.split(':')[0] else: destfile = file absfile = os.path.join(srcdir, file) if os.path.isdir(absfile): destdir = destfile dir = file files = list_directory_files(dir) add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files) else: fname = dist_base + '/' + destfile add_tarfile(tar, fname, absfile, destfile) tar.close() if Options.options.SIGN_RELEASE: import gzip try: os.unlink(dist_name + '.asc') except OSError: pass cmd = "gpg --detach-sign --armor " + dist_name os.system(cmd) uncompressed_tar = open(dist_name, 'rb') compressed_tar = gzip.open(dist_name + '.gz', 'wb') while 1: buffer = uncompressed_tar.read(1048576) if buffer: compressed_tar.write(buffer) else: break uncompressed_tar.close() compressed_tar.close() os.unlink(dist_name) Logs.info('Created %s.gz %s.asc' % (dist_name, dist_name)) dist_name = dist_name + '.gz' else: Logs.info('Created %s' % dist_name) return dist_name @conf def DIST_DIRS(dirs): '''set the directories to package, relative to top srcdir''' global dist_dirs if not dist_dirs: dist_dirs = dirs @conf def DIST_FILES(files, extend=False): '''set additional files for packaging, relative to top srcdir''' global dist_files if not dist_files: dist_files = files elif extend: dist_files = dist_files + " " + files @conf def DIST_BLACKLIST(blacklist): '''set the files to exclude from packaging, relative to top srcdir''' global dist_blacklist if not dist_blacklist: dist_blacklist = blacklist Scripting.dist = dist ntdb-1.0/buildtools/wafsamba/samba_headers.py000066400000000000000000000146421224151530700214010ustar00rootroot00000000000000# specialist handling of header files for Samba import Build, re, Task, TaskGen, shutil, sys, Logs from samba_utils import * def header_install_path(header, header_path): '''find the installation path for a header, given a header_path option''' if not header_path: return '' if not isinstance(header_path, list): return header_path for (p1, dir) in header_path: for p2 in TO_LIST(p1): if fnmatch.fnmatch(header, p2): return dir # default to current path return '' re_header = re.compile('^\s*#\s*include[ \t]*"([^"]+)"', re.I | re.M) # a dictionary mapping source header paths to public header paths header_map = {} def find_suggested_header(hpath): '''find a suggested header path to use''' base = os.path.basename(hpath) ret = [] for h in header_map: if os.path.basename(h) == base: ret.append('<%s>' % header_map[h]) ret.append('"%s"' % h) return ret def create_public_header(task): '''create a public header from a private one, output within the build tree''' src = task.inputs[0].abspath(task.env) tgt = task.outputs[0].bldpath(task.env) if os.path.exists(tgt): os.unlink(tgt) relsrc = os_path_relpath(src, task.env.TOPDIR) infile = open(src, mode='r') outfile = open(tgt, mode='w') linenumber = 0 search_paths = [ '', task.env.RELPATH ] for i in task.env.EXTRA_INCLUDES: if i.startswith('#'): search_paths.append(i[1:]) for line in infile: linenumber += 1 # allow some straight substitutions if task.env.public_headers_replace and line.strip() in task.env.public_headers_replace: outfile.write(task.env.public_headers_replace[line.strip()] + '\n') continue # see if its an include line m = re_header.match(line) if m is None: outfile.write(line) continue # its an include, get the header path hpath = m.group(1) if hpath.startswith("bin/default/"): hpath = hpath[12:] # some are always allowed if task.env.public_headers_skip and hpath in task.env.public_headers_skip: outfile.write(line) continue # work out the header this refers to found = False for s in search_paths: p = os.path.normpath(os.path.join(s, hpath)) if p in header_map: outfile.write("#include <%s>\n" % header_map[p]) found = True break if found: continue if task.env.public_headers_allow_broken: Logs.warn("Broken public header include '%s' in '%s'" % (hpath, relsrc)) outfile.write(line) continue # try to be nice to the developer by suggesting an alternative suggested = find_suggested_header(hpath) outfile.close() os.unlink(tgt) sys.stderr.write("%s:%u:Error: unable to resolve public header %s (maybe try one of %s)\n" % ( os.path.relpath(src, os.getcwd()), linenumber, hpath, suggested)) raise Utils.WafError("Unable to resolve header path '%s' in public header '%s' in directory %s" % ( hpath, relsrc, task.env.RELPATH)) infile.close() outfile.close() def public_headers_simple(bld, public_headers, header_path=None, public_headers_install=True): '''install some headers - simple version, no munging needed ''' if not public_headers_install: return for h in TO_LIST(public_headers): inst_path = header_install_path(h, header_path) if h.find(':') != -1: s = h.split(":") h_name = s[0] inst_name = s[1] else: h_name = h inst_name = os.path.basename(h) bld.INSTALL_FILES('${INCLUDEDIR}', h_name, destname=inst_name) def PUBLIC_HEADERS(bld, public_headers, header_path=None, public_headers_install=True): '''install some headers header_path may either be a string that is added to the INCLUDEDIR, or it can be a dictionary of wildcard patterns which map to destination directories relative to INCLUDEDIR ''' bld.SET_BUILD_GROUP('final') if not bld.env.build_public_headers: # in this case no header munging neeeded. Used for tdb, talloc etc public_headers_simple(bld, public_headers, header_path=header_path, public_headers_install=public_headers_install) return # create the public header in the given path # in the build tree for h in TO_LIST(public_headers): inst_path = header_install_path(h, header_path) if h.find(':') != -1: s = h.split(":") h_name = s[0] inst_name = s[1] else: h_name = h inst_name = os.path.basename(h) relpath1 = os_path_relpath(bld.srcnode.abspath(), bld.curdir) relpath2 = os_path_relpath(bld.curdir, bld.srcnode.abspath()) targetdir = os.path.normpath(os.path.join(relpath1, bld.env.build_public_headers, inst_path)) if not os.path.exists(os.path.join(bld.curdir, targetdir)): raise Utils.WafError("missing source directory %s for public header %s" % (targetdir, inst_name)) target = os.path.join(targetdir, inst_name) # the source path of the header, relative to the top of the source tree src_path = os.path.normpath(os.path.join(relpath2, h_name)) # the install path of the header, relative to the public include directory target_path = os.path.normpath(os.path.join(inst_path, inst_name)) header_map[src_path] = target_path t = bld.SAMBA_GENERATOR('HEADER_%s/%s/%s' % (relpath2, inst_path, inst_name), group='headers', rule=create_public_header, source=h_name, target=target) t.env.RELPATH = relpath2 t.env.TOPDIR = bld.srcnode.abspath() if not bld.env.public_headers_list: bld.env.public_headers_list = [] bld.env.public_headers_list.append(os.path.join(inst_path, inst_name)) if public_headers_install: bld.INSTALL_FILES('${INCLUDEDIR}', target, destname=os.path.join(inst_path, inst_name), flat=True) Build.BuildContext.PUBLIC_HEADERS = PUBLIC_HEADERS ntdb-1.0/buildtools/wafsamba/samba_install.py000066400000000000000000000174671224151530700214440ustar00rootroot00000000000000########################### # this handles the magic we need to do for installing # with all the configure options that affect rpath and shared # library use import Options from TaskGen import feature, before, after from samba_utils import * @feature('install_bin') @after('apply_core') @before('apply_link', 'apply_obj_vars') def install_binary(self): '''install a binary, taking account of the different rpath varients''' bld = self.bld # get the ldflags we will use for install and build install_ldflags = install_rpath(self) build_ldflags = build_rpath(bld) if not Options.is_install: # just need to set rpath if we are not installing self.env.RPATH = build_ldflags return # work out the install path, expanding variables install_path = getattr(self, 'samba_inst_path', None) or '${BINDIR}' install_path = bld.EXPAND_VARIABLES(install_path) orig_target = os.path.basename(self.target) if install_ldflags != build_ldflags: # we will be creating a new target name, and using that for the # install link. That stops us from overwriting the existing build # target, which has different ldflags self.target += '.inst' # setup the right rpath link flags for the install self.env.RPATH = install_ldflags if not self.samba_install: # this binary is marked not to be installed return # tell waf to install the right binary bld.install_as(os.path.join(install_path, orig_target), os.path.join(self.path.abspath(bld.env), self.target), chmod=MODE_755) @feature('install_lib') @after('apply_core') @before('apply_link', 'apply_obj_vars') def install_library(self): '''install a library, taking account of the different rpath varients''' if getattr(self, 'done_install_library', False): return bld = self.bld install_ldflags = install_rpath(self) build_ldflags = build_rpath(bld) if not Options.is_install or not getattr(self, 'samba_install', True): # just need to set the build rpath if we are not installing self.env.RPATH = build_ldflags return # setup the install path, expanding variables install_path = getattr(self, 'samba_inst_path', None) if install_path is None: if getattr(self, 'private_library', False): install_path = '${PRIVATELIBDIR}' else: install_path = '${LIBDIR}' install_path = bld.EXPAND_VARIABLES(install_path) target_name = self.target if install_ldflags != build_ldflags: # we will be creating a new target name, and using that for the # install link. That stops us from overwriting the existing build # target, which has different ldflags self.done_install_library = True t = self.clone('default') t.posted = False t.target += '.inst' self.env.RPATH = build_ldflags else: t = self t.env.RPATH = install_ldflags dev_link = None # in the following the names are: # - inst_name is the name with .inst. in it, in the build # directory # - install_name is the name in the install directory # - install_link is a symlink in the install directory, to install_name if getattr(self, 'samba_realname', None): install_name = self.samba_realname install_link = None if getattr(self, 'soname', ''): install_link = self.soname if getattr(self, 'samba_type', None) == 'PYTHON': inst_name = bld.make_libname(t.target, nolibprefix=True, python=True) else: inst_name = bld.make_libname(t.target) elif self.vnum: vnum_base = self.vnum.split('.')[0] install_name = bld.make_libname(target_name, version=self.vnum) install_link = bld.make_libname(target_name, version=vnum_base) inst_name = bld.make_libname(t.target) if not self.private_library: # only generate the dev link for non-bundled libs dev_link = bld.make_libname(target_name) elif getattr(self, 'soname', ''): install_name = bld.make_libname(target_name) install_link = self.soname inst_name = bld.make_libname(t.target) else: install_name = bld.make_libname(target_name) install_link = None inst_name = bld.make_libname(t.target) if t.env.SONAME_ST: # ensure we get the right names in the library if install_link: t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_link) else: t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_name) t.env.SONAME_ST = '' # tell waf to install the library bld.install_as(os.path.join(install_path, install_name), os.path.join(self.path.abspath(bld.env), inst_name), chmod=MODE_755) if install_link and install_link != install_name: # and the symlink if needed bld.symlink_as(os.path.join(install_path, install_link), os.path.basename(install_name)) if dev_link: bld.symlink_as(os.path.join(install_path, dev_link), os.path.basename(install_name)) @feature('cshlib') @after('apply_implib') @before('apply_vnum') def apply_soname(self): '''install a library, taking account of the different rpath varients''' if self.env.SONAME_ST and getattr(self, 'soname', ''): self.env.append_value('LINKFLAGS', self.env.SONAME_ST % self.soname) self.env.SONAME_ST = '' @feature('cshlib') @after('apply_implib') @before('apply_vnum') def apply_vscript(self): '''add version-script arguments to library build''' if self.env.HAVE_LD_VERSION_SCRIPT and getattr(self, 'version_script', ''): self.env.append_value('LINKFLAGS', "-Wl,--version-script=%s" % self.version_script) self.version_script = None ############################## # handle the creation of links for libraries and binaries in the build tree @feature('symlink_lib') @after('apply_link') def symlink_lib(self): '''symlink a shared lib''' if self.target.endswith('.inst'): return blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env)) libpath = self.link_task.outputs[0].abspath(self.env) # calculat the link target and put it in the environment soext="" vnum = getattr(self, 'vnum', None) if vnum is not None: soext = '.' + vnum.split('.')[0] link_target = getattr(self, 'link_name', '') if link_target == '': basename = os.path.basename(self.bld.make_libname(self.target, version=soext)) if getattr(self, "private_library", False): link_target = '%s/private/%s' % (LIB_PATH, basename) else: link_target = '%s/%s' % (LIB_PATH, basename) link_target = os.path.join(blddir, link_target) if os.path.lexists(link_target): if os.path.islink(link_target) and os.readlink(link_target) == libpath: return os.unlink(link_target) link_container = os.path.dirname(link_target) if not os.path.isdir(link_container): os.makedirs(link_container) os.symlink(libpath, link_target) @feature('symlink_bin') @after('apply_link') def symlink_bin(self): '''symlink a binary into the build directory''' if self.target.endswith('.inst'): return blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env)) if not self.link_task.outputs or not self.link_task.outputs[0]: raise Utils.WafError('no outputs found for %s in symlink_bin' % self.name) binpath = self.link_task.outputs[0].abspath(self.env) bldpath = os.path.join(self.bld.env.BUILD_DIRECTORY, self.link_task.outputs[0].name) if os.path.lexists(bldpath): if os.path.islink(bldpath) and os.readlink(bldpath) == binpath: return os.unlink(bldpath) os.symlink(binpath, bldpath) ntdb-1.0/buildtools/wafsamba/samba_optimisation.py000066400000000000000000000111111224151530700224710ustar00rootroot00000000000000# This file contains waf optimisations for Samba # most of these optimisations are possible because of the restricted build environment # that Samba has. For example, Samba doesn't attempt to cope with Win32 paths during the # build, and Samba doesn't need build varients # overall this makes some build tasks quite a bit faster from TaskGen import feature, after import preproc, Task @feature('cc', 'cxx') @after('apply_type_vars', 'apply_lib_vars', 'apply_core') def apply_incpaths(self): lst = [] try: kak = self.bld.kak except AttributeError: kak = self.bld.kak = {} # TODO move the uselib processing out of here for lib in self.to_list(self.uselib): for path in self.env['CPPPATH_' + lib]: if not path in lst: lst.append(path) if preproc.go_absolute: for path in preproc.standard_includes: if not path in lst: lst.append(path) for path in self.to_list(self.includes): if not path in lst: if preproc.go_absolute or path[0] != '/': # os.path.isabs(path): lst.append(path) else: self.env.prepend_value('CPPPATH', path) for path in lst: node = None if path[0] == '/': # os.path.isabs(path): if preproc.go_absolute: node = self.bld.root.find_dir(path) elif path[0] == '#': node = self.bld.srcnode if len(path) > 1: try: node = kak[path] except KeyError: kak[path] = node = node.find_dir(path[1:]) else: try: node = kak[(self.path.id, path)] except KeyError: kak[(self.path.id, path)] = node = self.path.find_dir(path) if node: self.env.append_value('INC_PATHS', node) @feature('cc') @after('apply_incpaths') def apply_obj_vars_cc(self): """after apply_incpaths for INC_PATHS""" env = self.env app = env.append_unique cpppath_st = env['CPPPATH_ST'] lss = env['_CCINCFLAGS'] try: cac = self.bld.cac except AttributeError: cac = self.bld.cac = {} # local flags come first # set the user-defined includes paths for i in env['INC_PATHS']: try: lss.extend(cac[i.id]) except KeyError: cac[i.id] = [cpppath_st % i.bldpath(env), cpppath_st % i.srcpath(env)] lss.extend(cac[i.id]) env['_CCINCFLAGS'] = lss # set the library include paths for i in env['CPPPATH']: app('_CCINCFLAGS', cpppath_st % i) import Node, Environment def vari(self): return "default" Environment.Environment.variant = vari def variant(self, env): if not env: return 0 elif self.id & 3 == Node.FILE: return 0 else: return "default" Node.Node.variant = variant import TaskGen, Task def create_task(self, name, src=None, tgt=None): task = Task.TaskBase.classes[name](self.env, generator=self) if src: task.set_inputs(src) if tgt: task.set_outputs(tgt) return task TaskGen.task_gen.create_task = create_task def hash_constraints(self): a = self.attr sum = hash((str(a('before', '')), str(a('after', '')), str(a('ext_in', '')), str(a('ext_out', '')), self.__class__.maxjobs)) return sum Task.TaskBase.hash_constraints = hash_constraints # import cc # from TaskGen import extension # import Utils # @extension(cc.EXT_CC) # def c_hook(self, node): # task = self.create_task('cc', node, node.change_ext('.o')) # try: # self.compiled_tasks.append(task) # except AttributeError: # raise Utils.WafError('Have you forgotten to set the feature "cc" on %s?' % str(self)) # bld = self.bld # try: # dc = bld.dc # except AttributeError: # dc = bld.dc = {} # if task.outputs[0].id in dc: # raise Utils.WafError('Samba, you are doing it wrong %r %s %s' % (task.outputs, task.generator, dc[task.outputs[0].id].generator)) # else: # dc[task.outputs[0].id] = task # return task def suncc_wrap(cls): '''work around a problem with cc on solaris not handling module aliases which have empty libs''' if getattr(cls, 'solaris_wrap', False): return cls.solaris_wrap = True oldrun = cls.run def run(self): if self.env.CC_NAME == "sun" and not self.inputs: self.env = self.env.copy() self.env.append_value('LINKFLAGS', '-') return oldrun(self) cls.run = run suncc_wrap(Task.TaskBase.classes['cc_link']) ntdb-1.0/buildtools/wafsamba/samba_patterns.py000066400000000000000000000236571224151530700216340ustar00rootroot00000000000000# a waf tool to add extension based build patterns for Samba import Task from TaskGen import extension from samba_utils import * from wafsamba import samba_version_file def write_version_header(task): '''print version.h contents''' src = task.inputs[0].srcpath(task.env) tgt = task.outputs[0].bldpath(task.env) version = samba_version_file(src, task.env.srcdir, env=task.env, is_install=task.env.is_install) string = str(version) f = open(tgt, 'w') s = f.write(string) f.close() return 0 def SAMBA_MKVERSION(bld, target): '''generate the version.h header for Samba''' # We only force waf to re-generate this file if we are installing, # because only then is information not included in the deps (the # git revision) included in the version. t = bld.SAMBA_GENERATOR('VERSION', rule=write_version_header, source= 'VERSION', target=target, always=bld.is_install) t.env.is_install = bld.is_install Build.BuildContext.SAMBA_MKVERSION = SAMBA_MKVERSION def write_build_options_header(fp): '''write preamble for build_options.c''' fp.write("/*\n") fp.write(" Unix SMB/CIFS implementation.\n") fp.write(" Build Options for Samba Suite\n") fp.write(" Copyright (C) Vance Lankhaar 2003\n") fp.write(" Copyright (C) Andrew Bartlett 2001\n") fp.write("\n") fp.write(" This program is free software; you can redistribute it and/or modify\n") fp.write(" it under the terms of the GNU General Public License as published by\n") fp.write(" the Free Software Foundation; either version 3 of the License, or\n") fp.write(" (at your option) any later version.\n") fp.write("\n") fp.write(" This program is distributed in the hope that it will be useful,\n") fp.write(" but WITHOUT ANY WARRANTY; without even the implied warranty of\n") fp.write(" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n") fp.write(" GNU General Public License for more details.\n") fp.write("\n") fp.write(" You should have received a copy of the GNU General Public License\n") fp.write(" along with this program; if not, see .\n") fp.write("*/\n") fp.write("\n") fp.write("#include \"includes.h\"\n") fp.write("#include \"build_env.h\"\n") fp.write("#include \"dynconfig/dynconfig.h\"\n") fp.write("\n") fp.write("static int output(bool screen, const char *format, ...) PRINTF_ATTRIBUTE(2,3);\n") fp.write("void build_options(bool screen);\n") fp.write("\n") fp.write("\n") fp.write("/****************************************************************************\n") fp.write("helper function for build_options\n") fp.write("****************************************************************************/\n") fp.write("static int output(bool screen, const char *format, ...)\n") fp.write("{\n") fp.write(" char *ptr = NULL;\n") fp.write(" int ret = 0;\n") fp.write(" va_list ap;\n") fp.write(" \n") fp.write(" va_start(ap, format);\n") fp.write(" ret = vasprintf(&ptr,format,ap);\n") fp.write(" va_end(ap);\n") fp.write("\n") fp.write(" if (screen) {\n") fp.write(" d_printf(\"%s\", ptr ? ptr : \"\");\n") fp.write(" } else {\n") fp.write(" DEBUG(4,(\"%s\", ptr ? ptr : \"\"));\n") fp.write(" }\n") fp.write(" \n") fp.write(" SAFE_FREE(ptr);\n") fp.write(" return ret;\n") fp.write("}\n") fp.write("\n") fp.write("/****************************************************************************\n") fp.write("options set at build time for the samba suite\n") fp.write("****************************************************************************/\n") fp.write("void build_options(bool screen)\n") fp.write("{\n") fp.write(" if ((DEBUGLEVEL < 4) && (!screen)) {\n") fp.write(" return;\n") fp.write(" }\n") fp.write("\n") fp.write("#ifdef _BUILD_ENV_H\n") fp.write(" /* Output information about the build environment */\n") fp.write(" output(screen,\"Build environment:\\n\");\n") fp.write(" output(screen,\" Built by: %s@%s\\n\",BUILD_ENV_USER,BUILD_ENV_HOST);\n") fp.write(" output(screen,\" Built on: %s\\n\",BUILD_ENV_DATE);\n") fp.write("\n") fp.write(" output(screen,\" Built using: %s\\n\",BUILD_ENV_COMPILER);\n") fp.write(" output(screen,\" Build host: %s\\n\",BUILD_ENV_UNAME);\n") fp.write(" output(screen,\" SRCDIR: %s\\n\",BUILD_ENV_SRCDIR);\n") fp.write(" output(screen,\" BUILDDIR: %s\\n\",BUILD_ENV_BUILDDIR);\n") fp.write("\n") fp.write("\n") fp.write("#endif\n") fp.write("\n") fp.write(" /* Output various paths to files and directories */\n") fp.write(" output(screen,\"\\nPaths:\\n\");\n") fp.write(" output(screen,\" SBINDIR: %s\\n\", get_dyn_SBINDIR());\n") fp.write(" output(screen,\" BINDIR: %s\\n\", get_dyn_BINDIR());\n") fp.write(" output(screen,\" CONFIGFILE: %s\\n\", get_dyn_CONFIGFILE());\n") fp.write(" output(screen,\" LOGFILEBASE: %s\\n\", get_dyn_LOGFILEBASE());\n") fp.write(" output(screen,\" LMHOSTSFILE: %s\\n\",get_dyn_LMHOSTSFILE());\n") fp.write(" output(screen,\" LIBDIR: %s\\n\",get_dyn_LIBDIR());\n") fp.write(" output(screen,\" MODULESDIR: %s\\n\",get_dyn_MODULESDIR());\n") fp.write(" output(screen,\" SHLIBEXT: %s\\n\",get_dyn_SHLIBEXT());\n") fp.write(" output(screen,\" LOCKDIR: %s\\n\",get_dyn_LOCKDIR());\n") fp.write(" output(screen,\" STATEDIR: %s\\n\",get_dyn_STATEDIR());\n") fp.write(" output(screen,\" CACHEDIR: %s\\n\",get_dyn_CACHEDIR());\n") fp.write(" output(screen,\" PIDDIR: %s\\n\", get_dyn_PIDDIR());\n") fp.write(" output(screen,\" SMB_PASSWD_FILE: %s\\n\",get_dyn_SMB_PASSWD_FILE());\n") fp.write(" output(screen,\" PRIVATE_DIR: %s\\n\",get_dyn_PRIVATE_DIR());\n") fp.write("\n") def write_build_options_footer(fp): fp.write(" /* Output the sizes of the various types */\n") fp.write(" output(screen, \"\\nType sizes:\\n\");\n") fp.write(" output(screen, \" sizeof(char): %lu\\n\",(unsigned long)sizeof(char));\n") fp.write(" output(screen, \" sizeof(int): %lu\\n\",(unsigned long)sizeof(int));\n") fp.write(" output(screen, \" sizeof(long): %lu\\n\",(unsigned long)sizeof(long));\n") fp.write("#if HAVE_LONGLONG\n") fp.write(" output(screen, \" sizeof(long long): %lu\\n\",(unsigned long)sizeof(long long));\n") fp.write("#endif\n") fp.write(" output(screen, \" sizeof(uint8): %lu\\n\",(unsigned long)sizeof(uint8));\n") fp.write(" output(screen, \" sizeof(uint16): %lu\\n\",(unsigned long)sizeof(uint16));\n") fp.write(" output(screen, \" sizeof(uint32): %lu\\n\",(unsigned long)sizeof(uint32));\n") fp.write(" output(screen, \" sizeof(short): %lu\\n\",(unsigned long)sizeof(short));\n") fp.write(" output(screen, \" sizeof(void*): %lu\\n\",(unsigned long)sizeof(void*));\n") fp.write(" output(screen, \" sizeof(size_t): %lu\\n\",(unsigned long)sizeof(size_t));\n") fp.write(" output(screen, \" sizeof(off_t): %lu\\n\",(unsigned long)sizeof(off_t));\n") fp.write(" output(screen, \" sizeof(ino_t): %lu\\n\",(unsigned long)sizeof(ino_t));\n") fp.write(" output(screen, \" sizeof(dev_t): %lu\\n\",(unsigned long)sizeof(dev_t));\n") fp.write("\n") fp.write(" output(screen, \"\\nBuiltin modules:\\n\");\n") fp.write(" output(screen, \" %s\\n\", STRING_STATIC_MODULES);\n") fp.write("}\n") def write_build_options_section(fp, keys, section): fp.write("\n\t/* Show %s */\n" % section) fp.write(" output(screen, \"\\n%s:\\n\");\n\n" % section) for k in sorted(keys): fp.write("#ifdef %s\n" % k) fp.write(" output(screen, \" %s\\n\");\n" % k) fp.write("#endif\n") fp.write("\n") def write_build_options(task): tbl = task.env['defines'] keys_option_with = [] keys_option_utmp = [] keys_option_have = [] keys_header_sys = [] keys_header_other = [] keys_misc = [] for key in tbl: if key.startswith("HAVE_UT_UT_") or key.find("UTMP") >= 0: keys_option_utmp.append(key) elif key.startswith("WITH_"): keys_option_with.append(key) elif key.startswith("HAVE_SYS_"): keys_header_sys.append(key) elif key.startswith("HAVE_"): if key.endswith("_H"): keys_header_other.append(key) else: keys_option_have.append(key) else: keys_misc.append(key) tgt = task.outputs[0].bldpath(task.env) f = open(tgt, 'w') write_build_options_header(f) write_build_options_section(f, keys_header_sys, "System Headers") write_build_options_section(f, keys_header_other, "Headers") write_build_options_section(f, keys_option_utmp, "UTMP Options") write_build_options_section(f, keys_option_have, "HAVE_* Defines") write_build_options_section(f, keys_option_with, "--with Options") write_build_options_section(f, keys_misc, "Build Options") write_build_options_footer(f) f.close() return 0 def SAMBA_BLDOPTIONS(bld, target): '''generate the bld_options.c for Samba''' t = bld.SAMBA_GENERATOR(target, rule=write_build_options, target=target, always=True) Build.BuildContext.SAMBA_BLDOPTIONS = SAMBA_BLDOPTIONS ntdb-1.0/buildtools/wafsamba/samba_pidl.py000066400000000000000000000125531224151530700207150ustar00rootroot00000000000000# waf build tool for building IDL files with pidl from TaskGen import before import Build, os, sys, Logs from samba_utils import * def SAMBA_PIDL(bld, pname, source, options='', output_dir='.', generate_tables=True): '''Build a IDL file using pidl. This will produce up to 13 output files depending on the options used''' bname = source[0:-4]; # strip off the .idl suffix bname = os.path.basename(bname) name = "%s_%s" % (pname, bname.upper()) if not SET_TARGET_TYPE(bld, name, 'PIDL'): return bld.SET_BUILD_GROUP('build_source') # the output files depend on the options used. Use this dictionary # to map between the options and the resulting file names options_map = { '--header' : '%s.h', '--ndr-parser' : 'ndr_%s.c ndr_%s.h', '--samba3-ndr-server' : 'srv_%s.c srv_%s.h', '--samba3-ndr-client' : 'cli_%s.c cli_%s.h', '--server' : 'ndr_%s_s.c', '--client' : 'ndr_%s_c.c ndr_%s_c.h', '--python' : 'py_%s.c', '--tdr-parser' : 'tdr_%s.c tdr_%s.h', '--dcom-proxy' : '%s_p.c', '--com-header' : 'com_%s.h' } table_header_idx = None out_files = [] options_list = TO_LIST(options) for o in options_list: if o in options_map: ofiles = TO_LIST(options_map[o]) for f in ofiles: out_files.append(os.path.join(output_dir, f % bname)) if f == 'ndr_%s.h': # remember this one for the tables generation table_header_idx = len(out_files) - 1 # depend on the full pidl sources source = TO_LIST(source) try: pidl_src_nodes = bld.pidl_files_cache except AttributeError: bld.pidl_files_cache = bld.srcnode.ant_glob('pidl/lib/Parse/**/*.pm', flat=False) bld.pidl_files_cache.extend(bld.srcnode.ant_glob('pidl', flat=False)) pidl_src_nodes = bld.pidl_files_cache # the cd .. is needed because pidl currently is sensitive to the directory it is run in cpp = "" cc = "" if bld.CONFIG_SET("CPP") and bld.CONFIG_GET("CPP") != "": if isinstance(bld.CONFIG_GET("CPP"), list): cpp = 'CPP="%s"' % " ".join(bld.CONFIG_GET("CPP")) else: cpp = 'CPP="%s"' % bld.CONFIG_GET("CPP") if cpp == "CPP=xlc_r": cpp = "" if bld.CONFIG_SET("CC"): if isinstance(bld.CONFIG_GET("CC"), list): cc = 'CC="%s"' % " ".join(bld.CONFIG_GET("CC")) else: cc = 'CC="%s"' % bld.CONFIG_GET("CC") t = bld(rule='cd .. && %s %s ${PERL} "${PIDL}" --quiet ${OPTIONS} --outputdir ${OUTPUTDIR} -- "${SRC[0].abspath(env)}"' % (cpp, cc), ext_out = '.c', before = 'cc', on_results = True, shell = True, source = source, target = out_files, name = name, samba_type = 'PIDL') # prime the list of nodes we are dependent on with the cached pidl sources t.allnodes = pidl_src_nodes t.env.PIDL = os.path.join(bld.srcnode.abspath(), 'pidl/pidl') t.env.OPTIONS = TO_LIST(options) t.env.OUTPUTDIR = bld.bldnode.name + '/' + bld.path.find_dir(output_dir).bldpath(t.env) if generate_tables and table_header_idx is not None: pidl_headers = LOCAL_CACHE(bld, 'PIDL_HEADERS') pidl_headers[name] = [bld.path.find_or_declare(out_files[table_header_idx])] t.more_includes = '#' + bld.path.relpath_gen(bld.srcnode) Build.BuildContext.SAMBA_PIDL = SAMBA_PIDL def SAMBA_PIDL_LIST(bld, name, source, options='', output_dir='.', generate_tables=True): '''A wrapper for building a set of IDL files''' for p in TO_LIST(source): bld.SAMBA_PIDL(name, p, options=options, output_dir=output_dir, generate_tables=generate_tables) Build.BuildContext.SAMBA_PIDL_LIST = SAMBA_PIDL_LIST ################################################################# # the rule for generating the NDR tables from TaskGen import feature, before @feature('collect') @before('exec_rule') def collect(self): pidl_headers = LOCAL_CACHE(self.bld, 'PIDL_HEADERS') for (name, hd) in pidl_headers.items(): y = self.bld.name_to_obj(name, self.env) self.bld.ASSERT(y is not None, 'Failed to find PIDL header %s' % name) y.post() for node in hd: self.bld.ASSERT(node is not None, 'Got None as build node generating PIDL table for %s' % name) self.source += " " + node.relpath_gen(self.path) def SAMBA_PIDL_TABLES(bld, name, target): '''generate the pidl NDR tables file''' headers = bld.env.PIDL_HEADERS bld.SET_BUILD_GROUP('main') t = bld( features = 'collect', rule = '${PERL} ${SRC} --output ${TGT} | sed "s|default/||" > ${TGT}', ext_out = '.c', before = 'cc', on_results = True, shell = True, source = '../../librpc/tables.pl', target = target, name = name) t.env.LIBRPC = os.path.join(bld.srcnode.abspath(), 'librpc') Build.BuildContext.SAMBA_PIDL_TABLES = SAMBA_PIDL_TABLES ntdb-1.0/buildtools/wafsamba/samba_python.py000066400000000000000000000042771224151530700213120ustar00rootroot00000000000000# waf build tool for building IDL files with pidl import Build from samba_utils import * from samba_autoconf import * from Configure import conf @conf def SAMBA_CHECK_PYTHON(conf, mandatory=True, version=(2,4,2)): # enable tool to build python extensions conf.find_program('python', var='PYTHON', mandatory=mandatory) conf.check_tool('python') path_python = conf.find_program('python') conf.env.PYTHON_SPECIFIED = (conf.env.PYTHON != path_python) conf.check_python_version(version) @conf def SAMBA_CHECK_PYTHON_HEADERS(conf, mandatory=True): if conf.env["python_headers_checked"] == []: conf.check_python_headers(mandatory) conf.env["python_headers_checked"] = "yes" else: conf.msg("python headers", "using cache") def SAMBA_PYTHON(bld, name, source='', deps='', public_deps='', realname=None, cflags='', includes='', init_function_sentinel=None, local_include=True, vars=None, enabled=True): '''build a python extension for Samba''' # when we support static python modules we'll need to gather # the list from all the SAMBA_PYTHON() targets if init_function_sentinel is not None: cflags += '-DSTATIC_LIBPYTHON_MODULES=%s' % init_function_sentinel source = bld.EXPAND_VARIABLES(source, vars=vars) if realname is not None: link_name = 'python_modules/%s' % realname else: link_name = None bld.SAMBA_LIBRARY(name, source=source, deps=deps, public_deps=public_deps, includes=includes, cflags=cflags, local_include=local_include, vars=vars, realname=realname, link_name=link_name, pyext=True, target_type='PYTHON', install_path='${PYTHONARCHDIR}', allow_undefined_symbols=True, enabled=enabled) Build.BuildContext.SAMBA_PYTHON = SAMBA_PYTHON ntdb-1.0/buildtools/wafsamba/samba_utils.py000066400000000000000000000515021224151530700211220ustar00rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section # and for SAMBA_ macros for building libraries, binaries etc import Build, os, sys, Options, Utils, Task, re, fnmatch, Logs from TaskGen import feature, before from Configure import conf, ConfigurationContext from Logs import debug import shlex # TODO: make this a --option LIB_PATH="shared" # sigh, python octal constants are a mess MODE_644 = int('644', 8) MODE_755 = int('755', 8) @conf def SET_TARGET_TYPE(ctx, target, value): '''set the target type of a target''' cache = LOCAL_CACHE(ctx, 'TARGET_TYPE') if target in cache and cache[target] != 'EMPTY': Logs.error("ERROR: Target '%s' in directory %s re-defined as %s - was %s" % (target, ctx.curdir, value, cache[target])) sys.exit(1) LOCAL_CACHE_SET(ctx, 'TARGET_TYPE', target, value) debug("task_gen: Target '%s' created of type '%s' in %s" % (target, value, ctx.curdir)) return True def GET_TARGET_TYPE(ctx, target): '''get target type from cache''' cache = LOCAL_CACHE(ctx, 'TARGET_TYPE') if not target in cache: return None return cache[target] ###################################################### # this is used as a decorator to make functions only # run once. Based on the idea from # http://stackoverflow.com/questions/815110/is-there-a-decorator-to-simply-cache-function-return-values runonce_ret = {} def runonce(function): def runonce_wrapper(*args): if args in runonce_ret: return runonce_ret[args] else: ret = function(*args) runonce_ret[args] = ret return ret return runonce_wrapper def ADD_LD_LIBRARY_PATH(path): '''add something to LD_LIBRARY_PATH''' if 'LD_LIBRARY_PATH' in os.environ: oldpath = os.environ['LD_LIBRARY_PATH'] else: oldpath = '' newpath = oldpath.split(':') if not path in newpath: newpath.append(path) os.environ['LD_LIBRARY_PATH'] = ':'.join(newpath) def needs_private_lib(bld, target): '''return True if a target links to a private library''' for lib in getattr(target, "final_libs", []): t = bld.name_to_obj(lib, bld.env) if t and getattr(t, 'private_library', False): return True return False def install_rpath(target): '''the rpath value for installation''' bld = target.bld bld.env['RPATH'] = [] ret = set() if bld.env.RPATH_ON_INSTALL: ret.add(bld.EXPAND_VARIABLES(bld.env.LIBDIR)) if bld.env.RPATH_ON_INSTALL_PRIVATE and needs_private_lib(bld, target): ret.add(bld.EXPAND_VARIABLES(bld.env.PRIVATELIBDIR)) return list(ret) def build_rpath(bld): '''the rpath value for build''' rpaths = [os.path.normpath('%s/%s' % (bld.env.BUILD_DIRECTORY, d)) for d in ("shared", "shared/private")] bld.env['RPATH'] = [] if bld.env.RPATH_ON_BUILD: return rpaths for rpath in rpaths: ADD_LD_LIBRARY_PATH(rpath) return [] @conf def LOCAL_CACHE(ctx, name): '''return a named build cache dictionary, used to store state inside other functions''' if name in ctx.env: return ctx.env[name] ctx.env[name] = {} return ctx.env[name] @conf def LOCAL_CACHE_SET(ctx, cachename, key, value): '''set a value in a local cache''' cache = LOCAL_CACHE(ctx, cachename) cache[key] = value @conf def ASSERT(ctx, expression, msg): '''a build assert call''' if not expression: raise Utils.WafError("ERROR: %s\n" % msg) Build.BuildContext.ASSERT = ASSERT def SUBDIR(bld, subdir, list): '''create a list of files by pre-pending each with a subdir name''' ret = '' for l in TO_LIST(list): ret = ret + os.path.normpath(os.path.join(subdir, l)) + ' ' return ret Build.BuildContext.SUBDIR = SUBDIR def dict_concat(d1, d2): '''concatenate two dictionaries d1 += d2''' for t in d2: if t not in d1: d1[t] = d2[t] def exec_command(self, cmd, **kw): '''this overrides the 'waf -v' debug output to be in a nice unix like format instead of a python list. Thanks to ita on #waf for this''' import Utils, Logs _cmd = cmd if isinstance(cmd, list): _cmd = ' '.join(cmd) debug('runner: %s' % _cmd) if self.log: self.log.write('%s\n' % cmd) kw['log'] = self.log try: if not kw.get('cwd', None): kw['cwd'] = self.cwd except AttributeError: self.cwd = kw['cwd'] = self.bldnode.abspath() return Utils.exec_command(cmd, **kw) Build.BuildContext.exec_command = exec_command def ADD_COMMAND(opt, name, function): '''add a new top level command to waf''' Utils.g_module.__dict__[name] = function opt.name = function Options.Handler.ADD_COMMAND = ADD_COMMAND @feature('cc', 'cshlib', 'cprogram') @before('apply_core','exec_rule') def process_depends_on(self): '''The new depends_on attribute for build rules allow us to specify a dependency on output from a source generation rule''' if getattr(self , 'depends_on', None): lst = self.to_list(self.depends_on) for x in lst: y = self.bld.name_to_obj(x, self.env) self.bld.ASSERT(y is not None, "Failed to find dependency %s of %s" % (x, self.name)) y.post() if getattr(y, 'more_includes', None): self.includes += " " + y.more_includes os_path_relpath = getattr(os.path, 'relpath', None) if os_path_relpath is None: # Python < 2.6 does not have os.path.relpath, provide a replacement # (imported from Python2.6.5~rc2) def os_path_relpath(path, start): """Return a relative version of a path""" start_list = os.path.abspath(start).split("/") path_list = os.path.abspath(path).split("/") # Work out how much of the filepath is shared by start and path. i = len(os.path.commonprefix([start_list, path_list])) rel_list = ['..'] * (len(start_list)-i) + path_list[i:] if not rel_list: return start return os.path.join(*rel_list) def unique_list(seq): '''return a uniquified list in the same order as the existing list''' seen = {} result = [] for item in seq: if item in seen: continue seen[item] = True result.append(item) return result def TO_LIST(str, delimiter=None): '''Split a list, preserving quoted strings and existing lists''' if str is None: return [] if isinstance(str, list): return str if len(str) == 0: return [] lst = str.split(delimiter) # the string may have had quotes in it, now we # check if we did have quotes, and use the slower shlex # if we need to for e in lst: if e[0] == '"': return shlex.split(str) return lst def subst_vars_error(string, env): '''substitute vars, throw an error if a variable is not defined''' lst = re.split('(\$\{\w+\})', string) out = [] for v in lst: if re.match('\$\{\w+\}', v): vname = v[2:-1] if not vname in env: raise KeyError("Failed to find variable %s in %s" % (vname, string)) v = env[vname] out.append(v) return ''.join(out) @conf def SUBST_ENV_VAR(ctx, varname): '''Substitute an environment variable for any embedded variables''' return subst_vars_error(ctx.env[varname], ctx.env) Build.BuildContext.SUBST_ENV_VAR = SUBST_ENV_VAR def ENFORCE_GROUP_ORDERING(bld): '''enforce group ordering for the project. This makes the group ordering apply only when you specify a target with --target''' if Options.options.compile_targets: @feature('*') @before('exec_rule', 'apply_core', 'collect') def force_previous_groups(self): if getattr(self.bld, 'enforced_group_ordering', False): return self.bld.enforced_group_ordering = True def group_name(g): tm = self.bld.task_manager return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0] my_id = id(self) bld = self.bld stop = None for g in bld.task_manager.groups: for t in g.tasks_gen: if id(t) == my_id: stop = id(g) debug('group: Forcing up to group %s for target %s', group_name(g), self.name or self.target) break if stop is not None: break if stop is None: return for i in xrange(len(bld.task_manager.groups)): g = bld.task_manager.groups[i] bld.task_manager.current_group = i if id(g) == stop: break debug('group: Forcing group %s', group_name(g)) for t in g.tasks_gen: if not getattr(t, 'forced_groups', False): debug('group: Posting %s', t.name or t.target) t.forced_groups = True t.post() Build.BuildContext.ENFORCE_GROUP_ORDERING = ENFORCE_GROUP_ORDERING def recursive_dirlist(dir, relbase, pattern=None): '''recursive directory list''' ret = [] for f in os.listdir(dir): f2 = dir + '/' + f if os.path.isdir(f2): ret.extend(recursive_dirlist(f2, relbase)) else: if pattern and not fnmatch.fnmatch(f, pattern): continue ret.append(os_path_relpath(f2, relbase)) return ret def mkdir_p(dir): '''like mkdir -p''' if not dir: return if dir.endswith("/"): mkdir_p(dir[:-1]) return if os.path.isdir(dir): return mkdir_p(os.path.dirname(dir)) os.mkdir(dir) def SUBST_VARS_RECURSIVE(string, env): '''recursively expand variables''' if string is None: return string limit=100 while (string.find('${') != -1 and limit > 0): string = subst_vars_error(string, env) limit -= 1 return string @conf def EXPAND_VARIABLES(ctx, varstr, vars=None): '''expand variables from a user supplied dictionary This is most useful when you pass vars=locals() to expand all your local variables in strings ''' if isinstance(varstr, list): ret = [] for s in varstr: ret.append(EXPAND_VARIABLES(ctx, s, vars=vars)) return ret if not isinstance(varstr, str): return varstr import Environment env = Environment.Environment() ret = varstr # substitute on user supplied dict if avaiilable if vars is not None: for v in vars.keys(): env[v] = vars[v] ret = SUBST_VARS_RECURSIVE(ret, env) # if anything left, subst on the environment as well if ret.find('${') != -1: ret = SUBST_VARS_RECURSIVE(ret, ctx.env) # make sure there is nothing left. Also check for the common # typo of $( instead of ${ if ret.find('${') != -1 or ret.find('$(') != -1: Logs.error('Failed to substitute all variables in varstr=%s' % ret) sys.exit(1) return ret Build.BuildContext.EXPAND_VARIABLES = EXPAND_VARIABLES def RUN_COMMAND(cmd, env=None, shell=False): '''run a external command, return exit code or signal''' if env: cmd = SUBST_VARS_RECURSIVE(cmd, env) status = os.system(cmd) if os.WIFEXITED(status): return os.WEXITSTATUS(status) if os.WIFSIGNALED(status): return - os.WTERMSIG(status) Logs.error("Unknown exit reason %d for command: %s" (status, cmd)) return -1 # make sure we have md5. some systems don't have it try: from hashlib import md5 # Even if hashlib.md5 exists, it may be unusable. # Try to use MD5 function. In FIPS mode this will cause an exception # and we'll get to the replacement code foo = md5.md5('abcd') except: try: import md5 # repeat the same check here, mere success of import is not enough. # Try to use MD5 function. In FIPS mode this will cause an exception foo = md5.md5('abcd') except: import Constants Constants.SIG_NIL = hash('abcd') class replace_md5(object): def __init__(self): self.val = None def update(self, val): self.val = hash((self.val, val)) def digest(self): return str(self.val) def hexdigest(self): return self.digest().encode('hex') def replace_h_file(filename): f = open(filename, 'rb') m = replace_md5() while (filename): filename = f.read(100000) m.update(filename) f.close() return m.digest() Utils.md5 = replace_md5 Task.md5 = replace_md5 Utils.h_file = replace_h_file def LOAD_ENVIRONMENT(): '''load the configuration environment, allowing access to env vars from new commands''' import Environment env = Environment.Environment() try: env.load('.lock-wscript') env.load(env.blddir + '/c4che/default.cache.py') except: pass return env def IS_NEWER(bld, file1, file2): '''return True if file1 is newer than file2''' t1 = os.stat(os.path.join(bld.curdir, file1)).st_mtime t2 = os.stat(os.path.join(bld.curdir, file2)).st_mtime return t1 > t2 Build.BuildContext.IS_NEWER = IS_NEWER @conf def RECURSE(ctx, directory): '''recurse into a directory, relative to the curdir or top level''' try: visited_dirs = ctx.visited_dirs except: visited_dirs = ctx.visited_dirs = set() d = os.path.join(ctx.curdir, directory) if os.path.exists(d): abspath = os.path.abspath(d) else: abspath = os.path.abspath(os.path.join(Utils.g_module.srcdir, directory)) ctxclass = ctx.__class__.__name__ key = ctxclass + ':' + abspath if key in visited_dirs: # already done it return visited_dirs.add(key) relpath = os_path_relpath(abspath, ctx.curdir) if ctxclass == 'Handler': return ctx.sub_options(relpath) if ctxclass == 'ConfigurationContext': return ctx.sub_config(relpath) if ctxclass == 'BuildContext': return ctx.add_subdirs(relpath) Logs.error('Unknown RECURSE context class', ctxclass) raise Options.Handler.RECURSE = RECURSE Build.BuildContext.RECURSE = RECURSE def CHECK_MAKEFLAGS(bld): '''check for MAKEFLAGS environment variable in case we are being called from a Makefile try to honor a few make command line flags''' if not 'WAF_MAKE' in os.environ: return makeflags = os.environ.get('MAKEFLAGS') if makeflags is None: return jobs_set = False # we need to use shlex.split to cope with the escaping of spaces # in makeflags for opt in shlex.split(makeflags): # options can come either as -x or as x if opt[0:2] == 'V=': Options.options.verbose = Logs.verbose = int(opt[2:]) if Logs.verbose > 0: Logs.zones = ['runner'] if Logs.verbose > 2: Logs.zones = ['*'] elif opt[0].isupper() and opt.find('=') != -1: # this allows us to set waf options on the make command line # for example, if you do "make FOO=blah", then we set the # option 'FOO' in Options.options, to blah. If you look in wafsamba/wscript # you will see that the command line accessible options have their dest= # set to uppercase, to allow for passing of options from make in this way # this is also how "make test TESTS=testpattern" works, and # "make VERBOSE=1" as well as things like "make SYMBOLCHECK=1" loc = opt.find('=') setattr(Options.options, opt[0:loc], opt[loc+1:]) elif opt[0] != '-': for v in opt: if v == 'j': jobs_set = True elif v == 'k': Options.options.keep = True elif opt == '-j': jobs_set = True elif opt == '-k': Options.options.keep = True if not jobs_set: # default to one job Options.options.jobs = 1 Build.BuildContext.CHECK_MAKEFLAGS = CHECK_MAKEFLAGS option_groups = {} def option_group(opt, name): '''find or create an option group''' global option_groups if name in option_groups: return option_groups[name] gr = opt.add_option_group(name) option_groups[name] = gr return gr Options.Handler.option_group = option_group def save_file(filename, contents, create_dir=False): '''save data to a file''' if create_dir: mkdir_p(os.path.dirname(filename)) try: f = open(filename, 'w') f.write(contents) f.close() except: return False return True def load_file(filename): '''return contents of a file''' try: f = open(filename, 'r') r = f.read() f.close() except: return None return r def reconfigure(ctx): '''rerun configure if necessary''' import Configure, samba_wildcard, Scripting if not os.path.exists(".lock-wscript"): raise Utils.WafError('configure has not been run') bld = samba_wildcard.fake_build_environment() Configure.autoconfig = True Scripting.check_configured(bld) def map_shlib_extension(ctx, name, python=False): '''map a filename with a shared library extension of .so to the real shlib name''' if name is None: return None if name[-1:].isdigit(): # some libraries have specified versions in the wscript rule return name (root1, ext1) = os.path.splitext(name) if python: (root2, ext2) = os.path.splitext(ctx.env.pyext_PATTERN) else: (root2, ext2) = os.path.splitext(ctx.env.shlib_PATTERN) return root1+ext2 Build.BuildContext.map_shlib_extension = map_shlib_extension def apply_pattern(filename, pattern): '''apply a filename pattern to a filename that may have a directory component''' dirname = os.path.dirname(filename) if not dirname: return pattern % filename basename = os.path.basename(filename) return os.path.join(dirname, pattern % basename) def make_libname(ctx, name, nolibprefix=False, version=None, python=False): """make a library filename Options: nolibprefix: don't include the lib prefix version : add a version number python : if we should use python module name conventions""" if python: libname = apply_pattern(name, ctx.env.pyext_PATTERN) else: libname = apply_pattern(name, ctx.env.shlib_PATTERN) if nolibprefix and libname[0:3] == 'lib': libname = libname[3:] if version: if version[0] == '.': version = version[1:] (root, ext) = os.path.splitext(libname) if ext == ".dylib": # special case - version goes before the prefix libname = "%s.%s%s" % (root, version, ext) else: libname = "%s%s.%s" % (root, ext, version) return libname Build.BuildContext.make_libname = make_libname def get_tgt_list(bld): '''return a list of build objects for samba''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') # build a list of task generators we are interested in tgt_list = [] for tgt in targets: type = targets[tgt] if not type in ['SUBSYSTEM', 'MODULE', 'BINARY', 'LIBRARY', 'ASN1', 'PYTHON']: continue t = bld.name_to_obj(tgt, bld.env) if t is None: Logs.error("Target %s of type %s has no task generator" % (tgt, type)) sys.exit(1) tgt_list.append(t) return tgt_list from Constants import WSCRIPT_FILE def PROCESS_SEPARATE_RULE(self, rule): ''' cause waf to process additional script based on `rule'. You should have file named wscript__rule in the current directory where stage is either 'configure' or 'build' ''' ctxclass = self.__class__.__name__ stage = '' if ctxclass == 'ConfigurationContext': stage = 'configure' elif ctxclass == 'BuildContext': stage = 'build' file_path = os.path.join(self.curdir, WSCRIPT_FILE+'_'+stage+'_'+rule) txt = load_file(file_path) if txt: dc = {'ctx': self} if getattr(self.__class__, 'pre_recurse', None): dc = self.pre_recurse(txt, file_path, self.curdir) exec(compile(txt, file_path, 'exec'), dc) if getattr(self.__class__, 'post_recurse', None): dc = self.post_recurse(txt, file_path, self.curdir) Build.BuildContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE ConfigurationContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE def AD_DC_BUILD_IS_ENABLED(self): if self.CONFIG_SET('AD_DC_BUILD_IS_ENABLED'): return True return False Build.BuildContext.AD_DC_BUILD_IS_ENABLED = AD_DC_BUILD_IS_ENABLED ntdb-1.0/buildtools/wafsamba/samba_version.py000066400000000000000000000252431224151530700214520ustar00rootroot00000000000000import os import Utils import samba_utils import sys def bzr_version_summary(path): try: import bzrlib except ImportError: return ("BZR-UNKNOWN", {}) import bzrlib.ui bzrlib.ui.ui_factory = bzrlib.ui.make_ui_for_terminal( sys.stdin, sys.stdout, sys.stderr) from bzrlib import branch, osutils, workingtree from bzrlib.plugin import load_plugins load_plugins() b = branch.Branch.open(path) (revno, revid) = b.last_revision_info() rev = b.repository.get_revision(revid) fields = { "BZR_REVISION_ID": revid, "BZR_REVNO": revno, "COMMIT_DATE": osutils.format_date_with_offset_in_original_timezone(rev.timestamp, rev.timezone or 0), "COMMIT_TIME": int(rev.timestamp), "BZR_BRANCH": rev.properties.get("branch-nick", ""), } # If possible, retrieve the git sha try: from bzrlib.plugins.git.object_store import get_object_store except ImportError: # No git plugin ret = "BZR-%d" % revno else: store = get_object_store(b.repository) store.lock_read() try: full_rev = store._lookup_revision_sha1(revid) finally: store.unlock() fields["GIT_COMMIT_ABBREV"] = full_rev[:7] fields["GIT_COMMIT_FULLREV"] = full_rev ret = "GIT-" + fields["GIT_COMMIT_ABBREV"] if workingtree.WorkingTree.open(path).has_changes(): fields["COMMIT_IS_CLEAN"] = 0 ret += "+" else: fields["COMMIT_IS_CLEAN"] = 1 return (ret, fields) def git_version_summary(path, env=None): # Get version from GIT if not 'GIT' in env and os.path.exists("/usr/bin/git"): # this is useful when doing make dist without configuring env.GIT = "/usr/bin/git" if not 'GIT' in env: return ("GIT-UNKNOWN", {}) environ = dict(os.environ) environ["GIT_DIR"] = '%s/.git' % path environ["GIT_WORK_TREE"] = path git = Utils.cmd_output(env.GIT + ' show --pretty=format:"%h%n%ct%n%H%n%cd" --stat HEAD', silent=True, env=environ) lines = git.splitlines() if not lines or len(lines) < 4: return ("GIT-UNKNOWN", {}) fields = { "GIT_COMMIT_ABBREV": lines[0], "GIT_COMMIT_FULLREV": lines[2], "COMMIT_TIME": int(lines[1]), "COMMIT_DATE": lines[3], } ret = "GIT-" + fields["GIT_COMMIT_ABBREV"] if env.GIT_LOCAL_CHANGES: clean = Utils.cmd_output('%s diff HEAD | wc -l' % env.GIT, silent=True).strip() if clean == "0": fields["COMMIT_IS_CLEAN"] = 1 else: fields["COMMIT_IS_CLEAN"] = 0 ret += "+" return (ret, fields) def distversion_version_summary(path): #get version from .distversion file f = open(path + '/.distversion', 'r') suffix = None fields = {} for line in f: line = line.strip() if line == '': continue if line.startswith("#"): continue try: split_line = line.split("=") if split_line[1] != "": key = split_line[0] value = split_line[1] if key == "SUFFIX": suffix = value continue fields[key] = value except: print("Failed to parse line %s from .distversion file." % (line)) raise f.close() if "COMMIT_TIME" in fields: fields["COMMIT_TIME"] = int(fields["COMMIT_TIME"]) if suffix is None: return ("UNKNOWN", fields) return (suffix, fields) class SambaVersion(object): def __init__(self, version_dict, path, env=None, is_install=True): '''Determine the version number of samba See VERSION for the format. Entries on that file are also accepted as dictionary entries here ''' self.MAJOR=None self.MINOR=None self.RELEASE=None self.REVISION=None self.TP_RELEASE=None self.ALPHA_RELEASE=None self.BETA_RELEASE=None self.PRE_RELEASE=None self.RC_RELEASE=None self.IS_SNAPSHOT=True self.RELEASE_NICKNAME=None self.VENDOR_SUFFIX=None self.VENDOR_PATCH=None for a, b in version_dict.iteritems(): if a.startswith("SAMBA_VERSION_"): setattr(self, a[14:], b) else: setattr(self, a, b) if self.IS_GIT_SNAPSHOT == "yes": self.IS_SNAPSHOT=True elif self.IS_GIT_SNAPSHOT == "no": self.IS_SNAPSHOT=False else: raise Exception("Unknown value for IS_GIT_SNAPSHOT: %s" % self.IS_GIT_SNAPSHOT) ## ## start with "3.0.22" ## self.MAJOR=int(self.MAJOR) self.MINOR=int(self.MINOR) self.RELEASE=int(self.RELEASE) SAMBA_VERSION_STRING = ("%u.%u.%u" % (self.MAJOR, self.MINOR, self.RELEASE)) ## ## maybe add "3.0.22a" or "4.0.0tp11" or "4.0.0alpha1" or "4.0.0beta1" or "3.0.22pre1" or "3.0.22rc1" ## We do not do pre or rc version on patch/letter releases ## if self.REVISION is not None: SAMBA_VERSION_STRING += self.REVISION if self.TP_RELEASE is not None: self.TP_RELEASE = int(self.TP_RELEASE) SAMBA_VERSION_STRING += "tp%u" % self.TP_RELEASE if self.ALPHA_RELEASE is not None: self.ALPHA_RELEASE = int(self.ALPHA_RELEASE) SAMBA_VERSION_STRING += ("alpha%u" % self.ALPHA_RELEASE) if self.BETA_RELEASE is not None: self.BETA_RELEASE = int(self.BETA_RELEASE) SAMBA_VERSION_STRING += ("beta%u" % self.BETA_RELEASE) if self.PRE_RELEASE is not None: self.PRE_RELEASE = int(self.PRE_RELEASE) SAMBA_VERSION_STRING += ("pre%u" % self.PRE_RELEASE) if self.RC_RELEASE is not None: self.RC_RELEASE = int(self.RC_RELEASE) SAMBA_VERSION_STRING += ("rc%u" % self.RC_RELEASE) if self.IS_SNAPSHOT: if not is_install: suffix = "DEVELOPERBUILD" self.vcs_fields = {} elif os.path.exists(os.path.join(path, ".git")): suffix, self.vcs_fields = git_version_summary(path, env=env) elif os.path.exists(os.path.join(path, ".bzr")): suffix, self.vcs_fields = bzr_version_summary(path) elif os.path.exists(os.path.join(path, ".distversion")): suffix, self.vcs_fields = distversion_version_summary(path) else: suffix = "UNKNOWN" self.vcs_fields = {} self.vcs_fields["SUFFIX"] = suffix SAMBA_VERSION_STRING += "-" + suffix else: self.vcs_fields = {} self.OFFICIAL_STRING = SAMBA_VERSION_STRING if self.VENDOR_SUFFIX is not None: SAMBA_VERSION_STRING += ("-" + self.VENDOR_SUFFIX) self.VENDOR_SUFFIX = self.VENDOR_SUFFIX if self.VENDOR_PATCH is not None: SAMBA_VERSION_STRING += ("-" + self.VENDOR_PATCH) self.VENDOR_PATCH = self.VENDOR_PATCH self.STRING = SAMBA_VERSION_STRING if self.RELEASE_NICKNAME is not None: self.STRING_WITH_NICKNAME = "%s (%s)" % (self.STRING, self.RELEASE_NICKNAME) else: self.STRING_WITH_NICKNAME = self.STRING def __str__(self): string="/* Autogenerated by waf */\n" string+="#define SAMBA_VERSION_MAJOR %u\n" % self.MAJOR string+="#define SAMBA_VERSION_MINOR %u\n" % self.MINOR string+="#define SAMBA_VERSION_RELEASE %u\n" % self.RELEASE if self.REVISION is not None: string+="#define SAMBA_VERSION_REVISION %u\n" % self.REVISION if self.TP_RELEASE is not None: string+="#define SAMBA_VERSION_TP_RELEASE %u\n" % self.TP_RELEASE if self.ALPHA_RELEASE is not None: string+="#define SAMBA_VERSION_ALPHA_RELEASE %u\n" % self.ALPHA_RELEASE if self.BETA_RELEASE is not None: string+="#define SAMBA_VERSION_BETA_RELEASE %u\n" % self.BETA_RELEASE if self.PRE_RELEASE is not None: string+="#define SAMBA_VERSION_PRE_RELEASE %u\n" % self.PRE_RELEASE if self.RC_RELEASE is not None: string+="#define SAMBA_VERSION_RC_RELEASE %u\n" % self.RC_RELEASE for name in sorted(self.vcs_fields.keys()): string+="#define SAMBA_VERSION_%s " % name value = self.vcs_fields[name] if isinstance(value, basestring): string += "\"%s\"" % value elif type(value) is int: string += "%d" % value else: raise Exception("Unknown type for %s: %r" % (name, value)) string += "\n" string+="#define SAMBA_VERSION_OFFICIAL_STRING \"" + self.OFFICIAL_STRING + "\"\n" if self.VENDOR_SUFFIX is not None: string+="#define SAMBA_VERSION_VENDOR_SUFFIX " + self.VENDOR_SUFFIX + "\n" if self.VENDOR_PATCH is not None: string+="#define SAMBA_VERSION_VENDOR_PATCH " + self.VENDOR_PATCH + "\n" if self.RELEASE_NICKNAME is not None: string+="#define SAMBA_VERSION_RELEASE_NICKNAME " + self.RELEASE_NICKNAME + "\n" # We need to put this #ifdef in to the headers so that vendors can override the version with a function string+=''' #ifdef SAMBA_VERSION_VENDOR_FUNCTION # define SAMBA_VERSION_STRING SAMBA_VERSION_VENDOR_FUNCTION #else /* SAMBA_VERSION_VENDOR_FUNCTION */ # define SAMBA_VERSION_STRING "''' + self.STRING_WITH_NICKNAME + '''" #endif ''' string+="/* Version for mkrelease.sh: \nSAMBA_VERSION_STRING=" + self.STRING_WITH_NICKNAME + "\n */\n" return string def samba_version_file(version_file, path, env=None, is_install=True): '''Parse the version information from a VERSION file''' f = open(version_file, 'r') version_dict = {} for line in f: line = line.strip() if line == '': continue if line.startswith("#"): continue try: split_line = line.split("=") if split_line[1] != "": value = split_line[1].strip('"') version_dict[split_line[0]] = value except: print("Failed to parse line %s from %s" % (line, version_file)) raise return SambaVersion(version_dict, path, env=env, is_install=is_install) def load_version(env=None, is_install=True): '''load samba versions either from ./VERSION or git return a version object for detailed breakdown''' if not env: env = samba_utils.LOAD_ENVIRONMENT() version = samba_version_file("./VERSION", ".", env, is_install=is_install) Utils.g_module.VERSION = version.STRING return version ntdb-1.0/buildtools/wafsamba/samba_wildcard.py000066400000000000000000000106661224151530700215610ustar00rootroot00000000000000# based on playground/evil in the waf svn tree import os, datetime import Scripting, Utils, Options, Logs, Environment, fnmatch from Constants import * from samba_utils import * def run_task(t, k): '''run a single build task''' ret = t.run() if ret: raise Utils.WafError("Failed to build %s: %u" % (k, ret)) def run_named_build_task(cmd): '''run a named build task, matching the cmd name using fnmatch wildcards against inputs and outputs of all build tasks''' bld = fake_build_environment(info=False) found = False cwd_node = bld.root.find_dir(os.getcwd()) top_node = bld.root.find_dir(bld.srcnode.abspath()) cmd = os.path.normpath(cmd) # cope with builds of bin/*/* if os.path.islink(cmd): cmd = os_path_relpath(os.readlink(cmd), os.getcwd()) if cmd[0:12] == "bin/default/": cmd = cmd[12:] for g in bld.task_manager.groups: for attr in ['outputs', 'inputs']: for t in g.tasks: s = getattr(t, attr, []) for k in s: relpath1 = k.relpath_gen(cwd_node) relpath2 = k.relpath_gen(top_node) if (fnmatch.fnmatch(relpath1, cmd) or fnmatch.fnmatch(relpath2, cmd)): t.position = [0,0] print(t.display()) run_task(t, k) found = True if not found: raise Utils.WafError("Unable to find build target matching %s" % cmd) def rewrite_compile_targets(): '''cope with the bin/ form of compile target''' if not Options.options.compile_targets: return bld = fake_build_environment(info=False) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') tlist = [] for t in Options.options.compile_targets.split(','): if not os.path.islink(t): tlist.append(t) continue link = os.readlink(t) list = link.split('/') for name in [list[-1], '/'.join(list[-2:])]: if name in targets: tlist.append(name) continue Options.options.compile_targets = ",".join(tlist) def wildcard_main(missing_cmd_fn): '''this replaces main from Scripting, allowing us to override the behaviour for unknown commands If a unknown command is found, then missing_cmd_fn() is called with the name of the requested command ''' Scripting.commands = Options.arg_line[:] # rewrite the compile targets to cope with the bin/xx form rewrite_compile_targets() while Scripting.commands: x = Scripting.commands.pop(0) ini = datetime.datetime.now() if x == 'configure': fun = Scripting.configure elif x == 'build': fun = Scripting.build else: fun = getattr(Utils.g_module, x, None) # this is the new addition on top of main from Scripting.py if not fun: missing_cmd_fn(x) break ctx = getattr(Utils.g_module, x + '_context', Utils.Context)() if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']: try: fun(ctx) except TypeError: fun() else: fun(ctx) ela = '' if not Options.options.progress_bar: ela = ' (%s)' % Utils.get_elapsed_time(ini) if x != 'init' and x != 'shutdown': Logs.info('%r finished successfully%s' % (x, ela)) if not Scripting.commands and x != 'shutdown': Scripting.commands.append('shutdown') def fake_build_environment(info=True, flush=False): """create all the tasks for the project, but do not run the build return the build context in use""" bld = getattr(Utils.g_module, 'build_context', Utils.Context)() bld = Scripting.check_configured(bld) Options.commands['install'] = False Options.commands['uninstall'] = False Options.is_install = False bld.is_install = 0 # False try: proj = Environment.Environment(Options.lockfile) except IOError: raise Utils.WafError("Project not configured (run 'waf configure' first)") bld.load_dirs(proj[SRCDIR], proj[BLDDIR]) bld.load_envs() if info: Logs.info("Waf: Entering directory `%s'" % bld.bldnode.abspath()) bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]]) bld.pre_build() if flush: bld.flush() return bld ntdb-1.0/buildtools/wafsamba/stale_files.py000066400000000000000000000076401224151530700211150ustar00rootroot00000000000000# encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) """ Add a pre-build hook to remove all build files which do not have a corresponding target This can be used for example to remove the targets that have changed name without performing a full 'waf clean' Of course, it will only work if there are no dynamically generated nodes/tasks, in which case the method will have to be modified to exclude some folders for example. """ import Logs, Build, os, samba_utils, Options, Utils from Runner import Parallel old_refill_task_list = Parallel.refill_task_list def replace_refill_task_list(self): '''replacement for refill_task_list() that deletes stale files''' iit = old_refill_task_list(self) bld = self.bld if not getattr(bld, 'new_rules', False): # we only need to check for stale files if the build rules changed return iit if Options.options.compile_targets: # not safe when --target is used return iit # execute only once if getattr(self, 'cleanup_done', False): return iit self.cleanup_done = True def group_name(g): tm = self.bld.task_manager return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0] bin_base = bld.bldnode.abspath() bin_base_len = len(bin_base) # paranoia if bin_base[-4:] != '/bin': raise Utils.WafError("Invalid bin base: %s" % bin_base) # obtain the expected list of files expected = [] for i in range(len(bld.task_manager.groups)): g = bld.task_manager.groups[i] tasks = g.tasks_gen for x in tasks: try: if getattr(x, 'target'): tlist = samba_utils.TO_LIST(getattr(x, 'target')) ttype = getattr(x, 'samba_type', None) task_list = getattr(x, 'compiled_tasks', []) if task_list: # this gets all of the .o files, including the task # ids, so foo.c maps to foo_3.o for idx=3 for tsk in task_list: for output in tsk.outputs: objpath = os.path.normpath(output.abspath(bld.env)) expected.append(objpath) for t in tlist: if ttype in ['LIBRARY','MODULE']: t = samba_utils.apply_pattern(t, bld.env.shlib_PATTERN) if ttype == 'PYTHON': t = samba_utils.apply_pattern(t, bld.env.pyext_PATTERN) p = os.path.join(x.path.abspath(bld.env), t) p = os.path.normpath(p) expected.append(p) for n in x.allnodes: p = n.abspath(bld.env) if p[0:bin_base_len] == bin_base: expected.append(p) except: pass for root, dirs, files in os.walk(bin_base): for f in files: p = root + '/' + f if os.path.islink(p): link = os.readlink(p) if link[0:bin_base_len] == bin_base: p = link if f in ['config.h']: continue (froot, fext) = os.path.splitext(f) if fext not in [ '.c', '.h', '.so', '.o' ]: continue if f[-7:] == '.inst.h': continue if p.find("/.conf") != -1: continue if not p in expected and os.path.exists(p): Logs.warn("Removing stale file: %s" % p) os.unlink(p) return iit def AUTOCLEANUP_STALE_FILES(bld): """automatically clean up any files in bin that shouldn't be there""" old_refill_task_list = Parallel.refill_task_list Parallel.refill_task_list = replace_refill_task_list Parallel.bld = bld Build.BuildContext.AUTOCLEANUP_STALE_FILES = AUTOCLEANUP_STALE_FILES ntdb-1.0/buildtools/wafsamba/symbols.py000066400000000000000000000530641224151530700203140ustar00rootroot00000000000000# a waf tool to extract symbols from object files or libraries # using nm, producing a set of exposed defined/undefined symbols import Utils, Build, subprocess, Logs, re from samba_wildcard import fake_build_environment from samba_utils import * # these are the data structures used in symbols.py: # # bld.env.symbol_map : dictionary mapping public symbol names to list of # subsystem names where that symbol exists # # t.in_library : list of libraries that t is in # # bld.env.public_symbols: set of public symbols for each subsystem # bld.env.used_symbols : set of used symbols for each subsystem # # bld.env.syslib_symbols: dictionary mapping system library name to set of symbols # for that library # bld.env.library_dict : dictionary mapping built library paths to subsystem names # # LOCAL_CACHE(bld, 'TARGET_TYPE') : dictionary mapping subsystem name to target type def symbols_extract(bld, objfiles, dynamic=False): '''extract symbols from objfile, returning a dictionary containing the set of undefined and public symbols for each file''' ret = {} # see if we can get some results from the nm cache if not bld.env.nm_cache: bld.env.nm_cache = {} objfiles = set(objfiles).copy() remaining = set() for obj in objfiles: if obj in bld.env.nm_cache: ret[obj] = bld.env.nm_cache[obj].copy() else: remaining.add(obj) objfiles = remaining if len(objfiles) == 0: return ret cmd = ["nm"] if dynamic: # needed for some .so files cmd.append("-D") cmd.extend(list(objfiles)) nmpipe = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout if len(objfiles) == 1: filename = list(objfiles)[0] ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set()} for line in nmpipe: line = line.strip() if line.endswith(':'): filename = line[:-1] ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set() } continue cols = line.split(" ") if cols == ['']: continue # see if the line starts with an address if len(cols) == 3: symbol_type = cols[1] symbol = cols[2] else: symbol_type = cols[0] symbol = cols[1] if symbol_type in "BDGTRVWSi": # its a public symbol ret[filename]["PUBLIC"].add(symbol) elif symbol_type in "U": ret[filename]["UNDEFINED"].add(symbol) # add to the cache for obj in objfiles: if obj in ret: bld.env.nm_cache[obj] = ret[obj].copy() else: bld.env.nm_cache[obj] = { "PUBLIC": set(), "UNDEFINED" : set() } return ret def real_name(name): if name.find(".objlist") != -1: name = name[:-8] return name def find_ldd_path(bld, libname, binary): '''find the path to the syslib we will link against''' ret = None if not bld.env.syslib_paths: bld.env.syslib_paths = {} if libname in bld.env.syslib_paths: return bld.env.syslib_paths[libname] lddpipe = subprocess.Popen(['ldd', binary], stdout=subprocess.PIPE).stdout for line in lddpipe: line = line.strip() cols = line.split(" ") if len(cols) < 3 or cols[1] != "=>": continue if cols[0].startswith("libc."): # save this one too bld.env.libc_path = cols[2] if cols[0].startswith(libname): ret = cols[2] bld.env.syslib_paths[libname] = ret return ret # some regular expressions for parsing readelf output re_sharedlib = re.compile('Shared library: \[(.*)\]') re_rpath = re.compile('Library rpath: \[(.*)\]') def get_libs(bld, binname): '''find the list of linked libraries for any binary or library binname is the path to the binary/library on disk We do this using readelf instead of ldd as we need to avoid recursing into system libraries ''' # see if we can get the result from the ldd cache if not bld.env.lib_cache: bld.env.lib_cache = {} if binname in bld.env.lib_cache: return bld.env.lib_cache[binname].copy() rpath = [] libs = set() elfpipe = subprocess.Popen(['readelf', '--dynamic', binname], stdout=subprocess.PIPE).stdout for line in elfpipe: m = re_sharedlib.search(line) if m: libs.add(m.group(1)) m = re_rpath.search(line) if m: rpath.extend(m.group(1).split(":")) ret = set() for lib in libs: found = False for r in rpath: path = os.path.join(r, lib) if os.path.exists(path): ret.add(os.path.realpath(path)) found = True break if not found: # we didn't find this lib using rpath. It is probably a system # library, so to find the path to it we either need to use ldd # or we need to start parsing /etc/ld.so.conf* ourselves. We'll # use ldd for now, even though it is slow path = find_ldd_path(bld, lib, binname) if path: ret.add(os.path.realpath(path)) bld.env.lib_cache[binname] = ret.copy() return ret def get_libs_recursive(bld, binname, seen): '''find the recursive list of linked libraries for any binary or library binname is the path to the binary/library on disk. seen is a set used to prevent loops ''' if binname in seen: return set() ret = get_libs(bld, binname) seen.add(binname) for lib in ret: # we don't want to recurse into system libraries. If a system # library that we use (eg. libcups) happens to use another library # (such as libkrb5) which contains common symbols with our own # libraries, then that is not an error if lib in bld.env.library_dict: ret = ret.union(get_libs_recursive(bld, lib, seen)) return ret def find_syslib_path(bld, libname, deps): '''find the path to the syslib we will link against''' # the strategy is to use the targets that depend on the library, and run ldd # on it to find the real location of the library that is used linkpath = deps[0].link_task.outputs[0].abspath(bld.env) if libname == "python": libname += bld.env.PYTHON_VERSION return find_ldd_path(bld, "lib%s" % libname.lower(), linkpath) def build_symbol_sets(bld, tgt_list): '''build the public_symbols and undefined_symbols attributes for each target''' if bld.env.public_symbols: return objlist = [] # list of object file objmap = {} # map from object filename to target (subsystem) name for t in tgt_list: t.public_symbols = set() t.undefined_symbols = set() t.used_symbols = set() for tsk in getattr(t, 'compiled_tasks', []): for output in tsk.outputs: objpath = output.abspath(bld.env) objlist.append(objpath) objmap[objpath] = t symbols = symbols_extract(bld, objlist) for obj in objlist: t = objmap[obj] t.public_symbols = t.public_symbols.union(symbols[obj]["PUBLIC"]) t.undefined_symbols = t.undefined_symbols.union(symbols[obj]["UNDEFINED"]) t.used_symbols = t.used_symbols.union(symbols[obj]["UNDEFINED"]) t.undefined_symbols = t.undefined_symbols.difference(t.public_symbols) # and the reverse map of public symbols to subsystem name bld.env.symbol_map = {} for t in tgt_list: for s in t.public_symbols: if not s in bld.env.symbol_map: bld.env.symbol_map[s] = [] bld.env.symbol_map[s].append(real_name(t.sname)) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') bld.env.public_symbols = {} for t in tgt_list: name = real_name(t.sname) if name in bld.env.public_symbols: bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t.public_symbols) else: bld.env.public_symbols[name] = t.public_symbols if t.samba_type == 'LIBRARY': for dep in t.add_objects: t2 = bld.name_to_obj(dep, bld.env) bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep)) bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t2.public_symbols) bld.env.used_symbols = {} for t in tgt_list: name = real_name(t.sname) if name in bld.env.used_symbols: bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t.used_symbols) else: bld.env.used_symbols[name] = t.used_symbols if t.samba_type == 'LIBRARY': for dep in t.add_objects: t2 = bld.name_to_obj(dep, bld.env) bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep)) bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t2.used_symbols) def build_library_dict(bld, tgt_list): '''build the library_dict dictionary''' if bld.env.library_dict: return bld.env.library_dict = {} for t in tgt_list: if t.samba_type in [ 'LIBRARY', 'PYTHON' ]: linkpath = os.path.realpath(t.link_task.outputs[0].abspath(bld.env)) bld.env.library_dict[linkpath] = t.sname def build_syslib_sets(bld, tgt_list): '''build the public_symbols for all syslibs''' if bld.env.syslib_symbols: return # work out what syslibs we depend on, and what targets those are used in syslibs = {} objmap = {} for t in tgt_list: if getattr(t, 'uselib', []) and t.samba_type in [ 'LIBRARY', 'BINARY', 'PYTHON' ]: for lib in t.uselib: if lib in ['PYEMBED', 'PYEXT']: lib = "python" if not lib in syslibs: syslibs[lib] = [] syslibs[lib].append(t) # work out the paths to each syslib syslib_paths = [] for lib in syslibs: path = find_syslib_path(bld, lib, syslibs[lib]) if path is None: Logs.warn("Unable to find syslib path for %s" % lib) if path is not None: syslib_paths.append(path) objmap[path] = lib.lower() # add in libc syslib_paths.append(bld.env.libc_path) objmap[bld.env.libc_path] = 'c' symbols = symbols_extract(bld, syslib_paths, dynamic=True) # keep a map of syslib names to public symbols bld.env.syslib_symbols = {} for lib in symbols: bld.env.syslib_symbols[lib] = symbols[lib]["PUBLIC"] # add to the map of symbols to dependencies for lib in symbols: for sym in symbols[lib]["PUBLIC"]: if not sym in bld.env.symbol_map: bld.env.symbol_map[sym] = [] bld.env.symbol_map[sym].append(objmap[lib]) # keep the libc symbols as well, as these are useful for some of the # sanity checks bld.env.libc_symbols = symbols[bld.env.libc_path]["PUBLIC"] # add to the combined map of dependency name to public_symbols for lib in bld.env.syslib_symbols: bld.env.public_symbols[objmap[lib]] = bld.env.syslib_symbols[lib] def build_autodeps(bld, t): '''build the set of dependencies for a target''' deps = set() name = real_name(t.sname) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for sym in t.undefined_symbols: if sym in t.public_symbols: continue if sym in bld.env.symbol_map: depname = bld.env.symbol_map[sym] if depname == [ name ]: # self dependencies aren't interesting continue if t.in_library == depname: # no need to depend on the library we are part of continue if depname[0] in ['c', 'python']: # these don't go into autodeps continue if targets[depname[0]] in [ 'SYSLIB' ]: deps.add(depname[0]) continue t2 = bld.name_to_obj(depname[0], bld.env) if len(t2.in_library) != 1: deps.add(depname[0]) continue if t2.in_library == t.in_library: # if we're part of the same library, we don't need to autodep continue deps.add(t2.in_library[0]) t.autodeps = deps def build_library_names(bld, tgt_list): '''add a in_library attribute to all targets that are part of a library''' if bld.env.done_build_library_names: return for t in tgt_list: t.in_library = [] for t in tgt_list: if t.samba_type in [ 'LIBRARY' ]: for obj in t.samba_deps_extended: t2 = bld.name_to_obj(obj, bld.env) if t2 and t2.samba_type in [ 'SUBSYSTEM', 'ASN1' ]: if not t.sname in t2.in_library: t2.in_library.append(t.sname) bld.env.done_build_library_names = True def check_library_deps(bld, t): '''check that all the autodeps that have mutual dependency of this target are in the same library as the target''' name = real_name(t.sname) if len(t.in_library) > 1: Logs.warn("WARNING: Target '%s' in multiple libraries: %s" % (t.sname, t.in_library)) for dep in t.autodeps: t2 = bld.name_to_obj(dep, bld.env) if t2 is None: continue for dep2 in t2.autodeps: if dep2 == name and t.in_library != t2.in_library: Logs.warn("WARNING: mutual dependency %s <=> %s" % (name, real_name(t2.sname))) Logs.warn("Libraries should match. %s != %s" % (t.in_library, t2.in_library)) # raise Utils.WafError("illegal mutual dependency") def check_syslib_collisions(bld, tgt_list): '''check if a target has any symbol collisions with a syslib We do not want any code in Samba to use a symbol name from a system library. The chance of that causing problems is just too high. Note that libreplace uses a rep_XX approach of renaming symbols via macros ''' has_error = False for t in tgt_list: for lib in bld.env.syslib_symbols: common = t.public_symbols.intersection(bld.env.syslib_symbols[lib]) if common: Logs.error("ERROR: Target '%s' has symbols '%s' which is also in syslib '%s'" % (t.sname, common, lib)) has_error = True if has_error: raise Utils.WafError("symbols in common with system libraries") def check_dependencies(bld, t): '''check for depenencies that should be changed''' if bld.name_to_obj(t.sname + ".objlist", bld.env): return targets = LOCAL_CACHE(bld, 'TARGET_TYPE') remaining = t.undefined_symbols.copy() remaining = remaining.difference(t.public_symbols) sname = real_name(t.sname) deps = set(t.samba_deps) for d in t.samba_deps: if targets[d] in [ 'EMPTY', 'DISABLED', 'SYSLIB', 'GENERATOR' ]: continue bld.ASSERT(d in bld.env.public_symbols, "Failed to find symbol list for dependency '%s'" % d) diff = remaining.intersection(bld.env.public_symbols[d]) if not diff and targets[sname] != 'LIBRARY': Logs.info("Target '%s' has no dependency on %s" % (sname, d)) else: remaining = remaining.difference(diff) t.unsatisfied_symbols = set() needed = {} for sym in remaining: if sym in bld.env.symbol_map: dep = bld.env.symbol_map[sym] if not dep[0] in needed: needed[dep[0]] = set() needed[dep[0]].add(sym) else: t.unsatisfied_symbols.add(sym) for dep in needed: Logs.info("Target '%s' should add dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep]))) def check_syslib_dependencies(bld, t): '''check for syslib depenencies''' if bld.name_to_obj(t.sname + ".objlist", bld.env): return sname = real_name(t.sname) remaining = set() features = TO_LIST(t.features) if 'pyembed' in features or 'pyext' in features: if 'python' in bld.env.public_symbols: t.unsatisfied_symbols = t.unsatisfied_symbols.difference(bld.env.public_symbols['python']) needed = {} for sym in t.unsatisfied_symbols: if sym in bld.env.symbol_map: dep = bld.env.symbol_map[sym][0] if dep == 'c': continue if not dep in needed: needed[dep] = set() needed[dep].add(sym) else: remaining.add(sym) for dep in needed: Logs.info("Target '%s' should add syslib dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep]))) if remaining: debug("deps: Target '%s' has unsatisfied symbols: %s" % (sname, " ".join(remaining))) def symbols_symbolcheck(task): '''check the internal dependency lists''' bld = task.env.bld tgt_list = get_tgt_list(bld) build_symbol_sets(bld, tgt_list) build_library_names(bld, tgt_list) for t in tgt_list: t.autodeps = set() if getattr(t, 'source', ''): build_autodeps(bld, t) for t in tgt_list: check_dependencies(bld, t) for t in tgt_list: check_library_deps(bld, t) def symbols_syslibcheck(task): '''check the syslib dependencies''' bld = task.env.bld tgt_list = get_tgt_list(bld) build_syslib_sets(bld, tgt_list) check_syslib_collisions(bld, tgt_list) for t in tgt_list: check_syslib_dependencies(bld, t) def symbols_whyneeded(task): """check why 'target' needs to link to 'subsystem'""" bld = task.env.bld tgt_list = get_tgt_list(bld) why = Options.options.WHYNEEDED.split(":") if len(why) != 2: raise Utils.WafError("usage: WHYNEEDED=TARGET:DEPENDENCY") target = why[0] subsystem = why[1] build_symbol_sets(bld, tgt_list) build_library_names(bld, tgt_list) build_syslib_sets(bld, tgt_list) Logs.info("Checking why %s needs to link to %s" % (target, subsystem)) if not target in bld.env.used_symbols: Logs.warn("unable to find target '%s' in used_symbols dict" % target) return if not subsystem in bld.env.public_symbols: Logs.warn("unable to find subsystem '%s' in public_symbols dict" % subsystem) return overlap = bld.env.used_symbols[target].intersection(bld.env.public_symbols[subsystem]) if not overlap: Logs.info("target '%s' doesn't use any public symbols from '%s'" % (target, subsystem)) else: Logs.info("target '%s' uses symbols %s from '%s'" % (target, overlap, subsystem)) def report_duplicate(bld, binname, sym, libs, fail_on_error): '''report duplicated symbols''' if sym in ['_init', '_fini', '_edata', '_end', '__bss_start']: return libnames = [] for lib in libs: if lib in bld.env.library_dict: libnames.append(bld.env.library_dict[lib]) else: libnames.append(lib) if fail_on_error: raise Utils.WafError("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames)) else: print("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames)) def symbols_dupcheck_binary(bld, binname, fail_on_error): '''check for duplicated symbols in one binary''' libs = get_libs_recursive(bld, binname, set()) symlist = symbols_extract(bld, libs, dynamic=True) symmap = {} for libpath in symlist: for sym in symlist[libpath]['PUBLIC']: if sym == '_GLOBAL_OFFSET_TABLE_': continue if not sym in symmap: symmap[sym] = set() symmap[sym].add(libpath) for sym in symmap: if len(symmap[sym]) > 1: for libpath in symmap[sym]: if libpath in bld.env.library_dict: report_duplicate(bld, binname, sym, symmap[sym], fail_on_error) break def symbols_dupcheck(task, fail_on_error=False): '''check for symbols defined in two different subsystems''' bld = task.env.bld tgt_list = get_tgt_list(bld) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') build_library_dict(bld, tgt_list) for t in tgt_list: if t.samba_type == 'BINARY': binname = os_path_relpath(t.link_task.outputs[0].abspath(bld.env), os.getcwd()) symbols_dupcheck_binary(bld, binname, fail_on_error) def symbols_dupcheck_fatal(task): '''check for symbols defined in two different subsystems (and fail if duplicates are found)''' symbols_dupcheck(task, fail_on_error=True) def SYMBOL_CHECK(bld): '''check our dependency lists''' if Options.options.SYMBOLCHECK: bld.SET_BUILD_GROUP('symbolcheck') task = bld(rule=symbols_symbolcheck, always=True, name='symbol checking') task.env.bld = bld bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_syslibcheck, always=True, name='syslib checking') task.env.bld = bld bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_dupcheck, always=True, name='symbol duplicate checking') task.env.bld = bld if Options.options.WHYNEEDED: bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_whyneeded, always=True, name='check why a dependency is needed') task.env.bld = bld Build.BuildContext.SYMBOL_CHECK = SYMBOL_CHECK def DUP_SYMBOL_CHECK(bld): if Options.options.DUP_SYMBOLCHECK and bld.env.DEVELOPER: '''check for duplicate symbols''' bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_dupcheck_fatal, always=True, name='symbol duplicate checking') task.env.bld = bld Build.BuildContext.DUP_SYMBOL_CHECK = DUP_SYMBOL_CHECK ntdb-1.0/buildtools/wafsamba/tests/000077500000000000000000000000001224151530700174045ustar00rootroot00000000000000ntdb-1.0/buildtools/wafsamba/tests/__init__.py000066400000000000000000000022401224151530700215130ustar00rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """Tests for wafsamba.""" from unittest import ( TestCase, TestLoader, ) def test_suite(): names = [ 'abi', 'bundled', 'utils', ] module_names = ['wafsamba.tests.test_' + name for name in names] loader = TestLoader() result = loader.suiteClass() suite = loader.loadTestsFromNames(module_names) result.addTests(suite) return result ntdb-1.0/buildtools/wafsamba/tests/test_abi.py000066400000000000000000000101371224151530700215520ustar00rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from wafsamba.tests import TestCase from wafsamba.samba_abi import ( abi_write_vscript, normalise_signature, ) from cStringIO import StringIO class NormaliseSignatureTests(TestCase): def test_function_simple(self): self.assertEquals("int (const struct GUID *, const struct GUID *)", normalise_signature("$2 = {int (const struct GUID *, const struct GUID *)} 0xe871 ")) def test_maps_Bool(self): # Some types have different internal names self.assertEquals("bool (const struct GUID *)", normalise_signature("$1 = {_Bool (const struct GUID *)} 0xe75b ")) def test_function_keep(self): self.assertEquals( "enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)", normalise_signature("enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)")) def test_struct_constant(self): self.assertEquals( 'uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0', normalise_signature('$239 = {uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0}')) def test_incomplete_sequence(self): # Newer versions of gdb insert these incomplete sequence elements self.assertEquals( 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2', normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237", , node = "\\b\\000+\\020H`"}, if_version = 2}')) self.assertEquals( 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2', normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2}')) class WriteVscriptTests(TestCase): def test_one(self): f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", [], { "old": "1.0", "new": "1.0"}, ["*"]) self.assertEquals(f.getvalue(), """\ 1.0 { \tglobal: \t\t*; }; """) def test_simple(self): # No restrictions. f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", ["0.1"], { "old": "0.1", "new": "1.0"}, ["*"]) self.assertEquals(f.getvalue(), """\ MYLIB_0.1 { \tglobal: \t\told; }; 1.0 { \tglobal: \t\t*; }; """) def test_exclude(self): f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", [], { "exc_old": "0.1", "old": "0.1", "new": "1.0"}, ["!exc_*"]) self.assertEquals(f.getvalue(), """\ 1.0 { \tglobal: \t\t*; \tlocal: \t\texc_*; }; """) def test_excludes_and_includes(self): f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", [], { "pub_foo": "1.0", "exc_bar": "1.0", "other": "1.0" }, ["pub_*", "!exc_*"]) self.assertEquals(f.getvalue(), """\ 1.0 { \tglobal: \t\tpub_*; \tlocal: \t\texc_*; \t\t*; }; """) ntdb-1.0/buildtools/wafsamba/tests/test_bundled.py000066400000000000000000000017641224151530700224420ustar00rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from wafsamba.tests import TestCase from wafsamba.samba_bundled import ( tuplize_version, ) class TuplizeVersionTests(TestCase): def test_simple(self): self.assertEquals((1, 2, 10), tuplize_version("1.2.10")) ntdb-1.0/buildtools/wafsamba/tests/test_utils.py000066400000000000000000000047341224151530700221650ustar00rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from wafsamba.tests import TestCase from wafsamba.samba_utils import ( TO_LIST, dict_concat, subst_vars_error, unique_list, ) class ToListTests(TestCase): def test_none(self): self.assertEquals([], TO_LIST(None)) def test_already_list(self): self.assertEquals(["foo", "bar", 1], TO_LIST(["foo", "bar", 1])) def test_default_delimiter(self): self.assertEquals(["foo", "bar"], TO_LIST("foo bar")) self.assertEquals(["foo", "bar"], TO_LIST(" foo bar ")) self.assertEquals(["foo ", "bar"], TO_LIST(" \"foo \" bar ")) def test_delimiter(self): self.assertEquals(["foo", "bar"], TO_LIST("foo,bar", ",")) self.assertEquals([" foo", "bar "], TO_LIST(" foo,bar ", ",")) self.assertEquals([" \" foo\"", " bar "], TO_LIST(" \" foo\", bar ", ",")) class UniqueListTests(TestCase): def test_unique_list(self): self.assertEquals(["foo", "bar"], unique_list(["foo", "bar", "foo"])) class SubstVarsErrorTests(TestCase): def test_valid(self): self.assertEquals("", subst_vars_error("", {})) self.assertEquals("FOO bar", subst_vars_error("${F} bar", {"F": "FOO"})) def test_invalid(self): self.assertRaises(KeyError, subst_vars_error, "${F}", {}) class DictConcatTests(TestCase): def test_empty(self): ret = {} dict_concat(ret, {}) self.assertEquals({}, ret) def test_same(self): ret = {"foo": "bar"} dict_concat(ret, {"foo": "bla"}) self.assertEquals({"foo": "bar"}, ret) def test_simple(self): ret = {"foo": "bar"} dict_concat(ret, {"blie": "bla"}) self.assertEquals({"foo": "bar", "blie": "bla"}, ret) ntdb-1.0/buildtools/wafsamba/tru64cc.py000066400000000000000000000036331224151530700201130ustar00rootroot00000000000000 # compiler definition for tru64/OSF1 cc compiler # based on suncc.py from waf import os, optparse import Utils, Options, Configure import ccroot, ar from Configure import conftest from compiler_cc import c_compiler c_compiler['osf1V'] = ['gcc', 'tru64cc'] @conftest def find_tru64cc(conf): v = conf.env cc = None if v['CC']: cc = v['CC'] elif 'CC' in conf.environ: cc = conf.environ['CC'] if not cc: cc = conf.find_program('cc', var='CC') if not cc: conf.fatal('tru64cc was not found') cc = conf.cmd_to_list(cc) try: if not Utils.cmd_output(cc + ['-V']): conf.fatal('tru64cc %r was not found' % cc) except ValueError: conf.fatal('tru64cc -V could not be executed') v['CC'] = cc v['CC_NAME'] = 'tru64' @conftest def tru64cc_common_flags(conf): v = conf.env v['CC_SRC_F'] = '' v['CC_TGT_F'] = ['-c', '-o', ''] v['CPPPATH_ST'] = '-I%s' # template for adding include paths # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = '' v['CCLNK_TGT_F'] = ['-o', ''] v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STATICLIB_ST'] = '-l%s' v['STATICLIBPATH_ST'] = '-L%s' v['CCDEFINES_ST'] = '-D%s' # v['SONAME_ST'] = '-Wl,-h -Wl,%s' # v['SHLIB_MARKER'] = '-Bdynamic' # v['STATICLIB_MARKER'] = '-Bstatic' # program v['program_PATTERN'] = '%s' # shared library # v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC'] v['shlib_LINKFLAGS'] = ['-shared'] v['shlib_PATTERN'] = 'lib%s.so' # static lib # v['staticlib_LINKFLAGS'] = ['-Bstatic'] # v['staticlib_PATTERN'] = 'lib%s.a' detect = ''' find_tru64cc find_cpp find_ar tru64cc_common_flags cc_load_tools cc_add_flags link_add_flags ''' ntdb-1.0/buildtools/wafsamba/wafsamba.py000066400000000000000000000754061224151530700204110ustar00rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section # and for SAMBA_ macros for building libraries, binaries etc import Build, os, sys, Options, Task, Utils, cc, TaskGen, fnmatch, re, shutil, Logs, Constants from Configure import conf from Logs import debug from samba_utils import SUBST_VARS_RECURSIVE TaskGen.task_gen.apply_verif = Utils.nada # bring in the other samba modules from samba_optimisation import * from samba_utils import * from samba_version import * from samba_autoconf import * from samba_patterns import * from samba_pidl import * from samba_autoproto import * from samba_python import * from samba_deps import * from samba_bundled import * import samba_install import samba_conftests import samba_abi import samba_headers import tru64cc import irixcc import hpuxcc import generic_cc import samba_dist import samba_wildcard import stale_files import symbols import pkgconfig import configure_file # some systems have broken threading in python if os.environ.get('WAF_NOTHREADS') == '1': import nothreads LIB_PATH="shared" os.environ['PYTHONUNBUFFERED'] = '1' if Constants.HEXVERSION < 0x105019: Logs.error(''' Please use the version of waf that comes with Samba, not a system installed version. See http://wiki.samba.org/index.php/Waf for details. Alternatively, please run ./configure and make as usual. That will call the right version of waf.''') sys.exit(1) @conf def SAMBA_BUILD_ENV(conf): '''create the samba build environment''' conf.env.BUILD_DIRECTORY = conf.blddir mkdir_p(os.path.join(conf.blddir, LIB_PATH)) mkdir_p(os.path.join(conf.blddir, LIB_PATH, "private")) mkdir_p(os.path.join(conf.blddir, "modules")) mkdir_p(os.path.join(conf.blddir, 'python/samba/dcerpc')) # this allows all of the bin/shared and bin/python targets # to be expressed in terms of build directory paths mkdir_p(os.path.join(conf.blddir, 'default')) for (source, target) in [('shared', 'shared'), ('modules', 'modules'), ('python', 'python_modules')]: link_target = os.path.join(conf.blddir, 'default/' + target) if not os.path.lexists(link_target): os.symlink('../' + source, link_target) # get perl to put the blib files in the build directory blib_bld = os.path.join(conf.blddir, 'default/pidl/blib') blib_src = os.path.join(conf.srcdir, 'pidl/blib') mkdir_p(blib_bld + '/man1') mkdir_p(blib_bld + '/man3') if os.path.islink(blib_src): os.unlink(blib_src) elif os.path.exists(blib_src): shutil.rmtree(blib_src) def ADD_INIT_FUNCTION(bld, subsystem, target, init_function): '''add an init_function to the list for a subsystem''' if init_function is None: return bld.ASSERT(subsystem is not None, "You must specify a subsystem for init_function '%s'" % init_function) cache = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') if not subsystem in cache: cache[subsystem] = [] cache[subsystem].append( { 'TARGET':target, 'INIT_FUNCTION':init_function } ) Build.BuildContext.ADD_INIT_FUNCTION = ADD_INIT_FUNCTION ################################################################# def SAMBA_LIBRARY(bld, libname, source, deps='', public_deps='', includes='', public_headers=None, public_headers_install=True, header_path=None, pc_files=None, vnum=None, soname=None, cflags='', ldflags='', external_library=False, realname=None, autoproto=None, autoproto_extra_source='', group='main', depends_on='', local_include=True, global_include=True, vars=None, subdir=None, install_path=None, install=True, pyembed=False, pyext=False, target_type='LIBRARY', bundled_extension=True, link_name=None, abi_directory=None, abi_match=None, hide_symbols=False, manpages=None, private_library=False, grouping_library=False, allow_undefined_symbols=False, enabled=True): '''define a Samba library''' if LIB_MUST_BE_PRIVATE(bld, libname): private_library=True if not enabled: SET_TARGET_TYPE(bld, libname, 'DISABLED') return source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) # remember empty libraries, so we can strip the dependencies if ((source == '') or (source == [])) and deps == '' and public_deps == '': SET_TARGET_TYPE(bld, libname, 'EMPTY') return if BUILTIN_LIBRARY(bld, libname): obj_target = libname else: obj_target = libname + '.objlist' if group == 'libraries': subsystem_group = 'main' else: subsystem_group = group # first create a target for building the object files for this library # by separating in this way, we avoid recompiling the C files # separately for the install library and the build library bld.SAMBA_SUBSYSTEM(obj_target, source = source, deps = deps, public_deps = public_deps, includes = includes, public_headers = public_headers, public_headers_install = public_headers_install, header_path = header_path, cflags = cflags, group = subsystem_group, autoproto = autoproto, autoproto_extra_source=autoproto_extra_source, depends_on = depends_on, hide_symbols = hide_symbols, pyembed = pyembed, pyext = pyext, local_include = local_include, global_include = global_include) if BUILTIN_LIBRARY(bld, libname): return if not SET_TARGET_TYPE(bld, libname, target_type): return # the library itself will depend on that object target deps += ' ' + public_deps deps = TO_LIST(deps) deps.append(obj_target) realname = bld.map_shlib_extension(realname, python=(target_type=='PYTHON')) link_name = bld.map_shlib_extension(link_name, python=(target_type=='PYTHON')) # we don't want any public libraries without version numbers if (not private_library and target_type != 'PYTHON' and not realname): if vnum is None and soname is None: raise Utils.WafError("public library '%s' must have a vnum" % libname) if pc_files is None: raise Utils.WafError("public library '%s' must have pkg-config file" % libname) if public_headers is None: raise Utils.WafError("public library '%s' must have header files" % libname) if target_type == 'PYTHON' or realname or not private_library: bundled_name = libname.replace('_', '-') else: bundled_name = PRIVATE_NAME(bld, libname, bundled_extension, private_library) ldflags = TO_LIST(ldflags) features = 'cc cshlib symlink_lib install_lib' if pyext: features += ' pyext' if pyembed: features += ' pyembed' if abi_directory: features += ' abi_check' vscript = None if bld.env.HAVE_LD_VERSION_SCRIPT: if private_library: version = "%s_%s" % (Utils.g_module.APPNAME, Utils.g_module.VERSION) elif vnum: version = "%s_%s" % (libname, vnum) else: version = None if version: vscript = "%s.vscript" % libname bld.ABI_VSCRIPT(libname, abi_directory, version, vscript, abi_match) fullname = apply_pattern(bundled_name, bld.env.shlib_PATTERN) fullpath = bld.path.find_or_declare(fullname) vscriptpath = bld.path.find_or_declare(vscript) if not fullpath: raise Utils.WafError("unable to find fullpath for %s" % fullname) if not vscriptpath: raise Utils.WafError("unable to find vscript path for %s" % vscript) bld.add_manual_dependency(fullpath, vscriptpath) if Options.is_install: # also make the .inst file depend on the vscript instname = apply_pattern(bundled_name + '.inst', bld.env.shlib_PATTERN) bld.add_manual_dependency(bld.path.find_or_declare(instname), bld.path.find_or_declare(vscript)) vscript = os.path.join(bld.path.abspath(bld.env), vscript) bld.SET_BUILD_GROUP(group) t = bld( features = features, source = [], target = bundled_name, depends_on = depends_on, samba_ldflags = ldflags, samba_deps = deps, samba_includes = includes, version_script = vscript, local_include = local_include, global_include = global_include, vnum = vnum, soname = soname, install_path = None, samba_inst_path = install_path, name = libname, samba_realname = realname, samba_install = install, abi_directory = "%s/%s" % (bld.path.abspath(), abi_directory), abi_match = abi_match, private_library = private_library, grouping_library=grouping_library, allow_undefined_symbols=allow_undefined_symbols ) if realname and not link_name: link_name = 'shared/%s' % realname if link_name: t.link_name = link_name if pc_files is not None and not private_library: bld.PKG_CONFIG_FILES(pc_files, vnum=vnum) if (manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']): bld.MANPAGES(manpages, install) Build.BuildContext.SAMBA_LIBRARY = SAMBA_LIBRARY ################################################################# def SAMBA_BINARY(bld, binname, source, deps='', includes='', public_headers=None, header_path=None, modules=None, ldflags=None, cflags='', autoproto=None, use_hostcc=False, use_global_deps=True, compiler=None, group='main', manpages=None, local_include=True, global_include=True, subsystem_name=None, pyembed=False, vars=None, subdir=None, install=True, install_path=None, enabled=True): '''define a Samba binary''' if not enabled: SET_TARGET_TYPE(bld, binname, 'DISABLED') return if not SET_TARGET_TYPE(bld, binname, 'BINARY'): return features = 'cc cprogram symlink_bin install_bin' if pyembed: features += ' pyembed' obj_target = binname + '.objlist' source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) source = unique_list(TO_LIST(source)) if group == 'binaries': subsystem_group = 'main' else: subsystem_group = group # only specify PIE flags for binaries pie_cflags = cflags pie_ldflags = TO_LIST(ldflags) if bld.env['ENABLE_PIE'] == True: pie_cflags += ' -fPIE' pie_ldflags.extend(TO_LIST('-pie')) if bld.env['ENABLE_RELRO'] == True: pie_ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now')) # first create a target for building the object files for this binary # by separating in this way, we avoid recompiling the C files # separately for the install binary and the build binary bld.SAMBA_SUBSYSTEM(obj_target, source = source, deps = deps, includes = includes, cflags = pie_cflags, group = subsystem_group, autoproto = autoproto, subsystem_name = subsystem_name, local_include = local_include, global_include = global_include, use_hostcc = use_hostcc, pyext = pyembed, use_global_deps= use_global_deps) bld.SET_BUILD_GROUP(group) # the binary itself will depend on that object target deps = TO_LIST(deps) deps.append(obj_target) t = bld( features = features, source = [], target = binname, samba_deps = deps, samba_includes = includes, local_include = local_include, global_include = global_include, samba_modules = modules, top = True, samba_subsystem= subsystem_name, install_path = None, samba_inst_path= install_path, samba_install = install, samba_ldflags = pie_ldflags ) if manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']: bld.MANPAGES(manpages, install) Build.BuildContext.SAMBA_BINARY = SAMBA_BINARY ################################################################# def SAMBA_MODULE(bld, modname, source, deps='', includes='', subsystem=None, init_function=None, module_init_name='samba_init_module', autoproto=None, autoproto_extra_source='', cflags='', internal_module=True, local_include=True, global_include=True, vars=None, subdir=None, enabled=True, pyembed=False, manpages=None, allow_undefined_symbols=False ): '''define a Samba module.''' source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) if internal_module or BUILTIN_LIBRARY(bld, modname): # Do not create modules for disabled subsystems if subsystem and GET_TARGET_TYPE(bld, subsystem) == 'DISABLED': return bld.SAMBA_SUBSYSTEM(modname, source, deps=deps, includes=includes, autoproto=autoproto, autoproto_extra_source=autoproto_extra_source, cflags=cflags, local_include=local_include, global_include=global_include, enabled=enabled) bld.ADD_INIT_FUNCTION(subsystem, modname, init_function) return if not enabled: SET_TARGET_TYPE(bld, modname, 'DISABLED') return # Do not create modules for disabled subsystems if subsystem and GET_TARGET_TYPE(bld, subsystem) == 'DISABLED': return obj_target = modname + '.objlist' realname = modname if subsystem is not None: deps += ' ' + subsystem while realname.startswith("lib"+subsystem+"_"): realname = realname[len("lib"+subsystem+"_"):] while realname.startswith(subsystem+"_"): realname = realname[len(subsystem+"_"):] realname = bld.make_libname(realname) while realname.startswith("lib"): realname = realname[len("lib"):] build_link_name = "modules/%s/%s" % (subsystem, realname) if init_function: cflags += " -D%s=%s" % (init_function, module_init_name) bld.SAMBA_LIBRARY(modname, source, deps=deps, includes=includes, cflags=cflags, realname = realname, autoproto = autoproto, local_include=local_include, global_include=global_include, vars=vars, link_name=build_link_name, install_path="${MODULESDIR}/%s" % subsystem, pyembed=pyembed, manpages=manpages, allow_undefined_symbols=allow_undefined_symbols ) Build.BuildContext.SAMBA_MODULE = SAMBA_MODULE ################################################################# def SAMBA_SUBSYSTEM(bld, modname, source, deps='', public_deps='', includes='', public_headers=None, public_headers_install=True, header_path=None, cflags='', cflags_end=None, group='main', init_function_sentinel=None, autoproto=None, autoproto_extra_source='', depends_on='', local_include=True, local_include_first=True, global_include=True, subsystem_name=None, enabled=True, use_hostcc=False, use_global_deps=True, vars=None, subdir=None, hide_symbols=False, pyext=False, pyembed=False): '''define a Samba subsystem''' if not enabled: SET_TARGET_TYPE(bld, modname, 'DISABLED') return # remember empty subsystems, so we can strip the dependencies if ((source == '') or (source == [])) and deps == '' and public_deps == '': SET_TARGET_TYPE(bld, modname, 'EMPTY') return if not SET_TARGET_TYPE(bld, modname, 'SUBSYSTEM'): return source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) source = unique_list(TO_LIST(source)) deps += ' ' + public_deps bld.SET_BUILD_GROUP(group) features = 'cc' if pyext: features += ' pyext' if pyembed: features += ' pyembed' t = bld( features = features, source = source, target = modname, samba_cflags = CURRENT_CFLAGS(bld, modname, cflags, hide_symbols=hide_symbols), depends_on = depends_on, samba_deps = TO_LIST(deps), samba_includes = includes, local_include = local_include, local_include_first = local_include_first, global_include = global_include, samba_subsystem= subsystem_name, samba_use_hostcc = use_hostcc, samba_use_global_deps = use_global_deps, ) if cflags_end is not None: t.samba_cflags.extend(TO_LIST(cflags_end)) if autoproto is not None: bld.SAMBA_AUTOPROTO(autoproto, source + TO_LIST(autoproto_extra_source)) if public_headers is not None: bld.PUBLIC_HEADERS(public_headers, header_path=header_path, public_headers_install=public_headers_install) return t Build.BuildContext.SAMBA_SUBSYSTEM = SAMBA_SUBSYSTEM def SAMBA_GENERATOR(bld, name, rule, source='', target='', group='generators', enabled=True, public_headers=None, public_headers_install=True, header_path=None, vars=None, always=False): '''A generic source generator target''' if not SET_TARGET_TYPE(bld, name, 'GENERATOR'): return if not enabled: return dep_vars = [] if isinstance(vars, dict): dep_vars = vars.keys() elif isinstance(vars, list): dep_vars = vars bld.SET_BUILD_GROUP(group) t = bld( rule=rule, source=bld.EXPAND_VARIABLES(source, vars=vars), target=target, shell=isinstance(rule, str), on_results=True, before='cc', ext_out='.c', samba_type='GENERATOR', dep_vars = [rule] + dep_vars, name=name) if always: t.always = True if public_headers is not None: bld.PUBLIC_HEADERS(public_headers, header_path=header_path, public_headers_install=public_headers_install) return t Build.BuildContext.SAMBA_GENERATOR = SAMBA_GENERATOR @runonce def SETUP_BUILD_GROUPS(bld): '''setup build groups used to ensure that the different build phases happen consecutively''' bld.p_ln = bld.srcnode # we do want to see all targets! bld.env['USING_BUILD_GROUPS'] = True bld.add_group('setup') bld.add_group('build_compiler_source') bld.add_group('vscripts') bld.add_group('base_libraries') bld.add_group('generators') bld.add_group('compiler_prototypes') bld.add_group('compiler_libraries') bld.add_group('build_compilers') bld.add_group('build_source') bld.add_group('prototypes') bld.add_group('headers') bld.add_group('main') bld.add_group('symbolcheck') bld.add_group('syslibcheck') bld.add_group('final') Build.BuildContext.SETUP_BUILD_GROUPS = SETUP_BUILD_GROUPS def SET_BUILD_GROUP(bld, group): '''set the current build group''' if not 'USING_BUILD_GROUPS' in bld.env: return bld.set_group(group) Build.BuildContext.SET_BUILD_GROUP = SET_BUILD_GROUP @conf def ENABLE_TIMESTAMP_DEPENDENCIES(conf): """use timestamps instead of file contents for deps this currently doesn't work""" def h_file(filename): import stat st = os.stat(filename) if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file') m = Utils.md5() m.update(str(st.st_mtime)) m.update(str(st.st_size)) m.update(filename) return m.digest() Utils.h_file = h_file def SAMBA_SCRIPT(bld, name, pattern, installdir, installname=None): '''used to copy scripts from the source tree into the build directory for use by selftest''' source = bld.path.ant_glob(pattern) bld.SET_BUILD_GROUP('build_source') for s in TO_LIST(source): iname = s if installname is not None: iname = installname target = os.path.join(installdir, iname) tgtdir = os.path.dirname(os.path.join(bld.srcnode.abspath(bld.env), '..', target)) mkdir_p(tgtdir) link_src = os.path.normpath(os.path.join(bld.curdir, s)) link_dst = os.path.join(tgtdir, os.path.basename(iname)) if os.path.islink(link_dst) and os.readlink(link_dst) == link_src: continue if os.path.exists(link_dst): os.unlink(link_dst) Logs.info("symlink: %s -> %s/%s" % (s, installdir, iname)) os.symlink(link_src, link_dst) Build.BuildContext.SAMBA_SCRIPT = SAMBA_SCRIPT def copy_and_fix_python_path(task): pattern='sys.path.insert(0, "bin/python")' if task.env["PYTHONARCHDIR"] in sys.path and task.env["PYTHONDIR"] in sys.path: replacement = "" elif task.env["PYTHONARCHDIR"] == task.env["PYTHONDIR"]: replacement="""sys.path.insert(0, "%s")""" % task.env["PYTHONDIR"] else: replacement="""sys.path.insert(0, "%s") sys.path.insert(1, "%s")""" % (task.env["PYTHONARCHDIR"], task.env["PYTHONDIR"]) shebang = None if task.env["PYTHON"][0] == "/": replacement_shebang = "#!%s\n" % task.env["PYTHON"] else: replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PYTHON"] installed_location=task.outputs[0].bldpath(task.env) source_file = open(task.inputs[0].srcpath(task.env)) installed_file = open(installed_location, 'w') lineno = 0 for line in source_file: newline = line if lineno == 0 and task.env["PYTHON_SPECIFIED"] == True and line[:2] == "#!": newline = replacement_shebang elif pattern in line: newline = line.replace(pattern, replacement) installed_file.write(newline) lineno = lineno + 1 installed_file.close() os.chmod(installed_location, 0755) return 0 def install_file(bld, destdir, file, chmod=MODE_644, flat=False, python_fixup=False, destname=None, base_name=None): '''install a file''' destdir = bld.EXPAND_VARIABLES(destdir) if not destname: destname = file if flat: destname = os.path.basename(destname) dest = os.path.join(destdir, destname) if python_fixup: # fixup the python path it will use to find Samba modules inst_file = file + '.inst' bld.SAMBA_GENERATOR('python_%s' % destname, rule=copy_and_fix_python_path, source=file, target=inst_file) bld.add_manual_dependency(bld.path.find_or_declare(inst_file), bld.env["PYTHONARCHDIR"]) bld.add_manual_dependency(bld.path.find_or_declare(inst_file), bld.env["PYTHONDIR"]) bld.add_manual_dependency(bld.path.find_or_declare(inst_file), str(bld.env["PYTHON_SPECIFIED"])) bld.add_manual_dependency(bld.path.find_or_declare(inst_file), bld.env["PYTHON"]) file = inst_file if base_name: file = os.path.join(base_name, file) bld.install_as(dest, file, chmod=chmod) def INSTALL_FILES(bld, destdir, files, chmod=MODE_644, flat=False, python_fixup=False, destname=None, base_name=None): '''install a set of files''' for f in TO_LIST(files): install_file(bld, destdir, f, chmod=chmod, flat=flat, python_fixup=python_fixup, destname=destname, base_name=base_name) Build.BuildContext.INSTALL_FILES = INSTALL_FILES def INSTALL_WILDCARD(bld, destdir, pattern, chmod=MODE_644, flat=False, python_fixup=False, exclude=None, trim_path=None): '''install a set of files matching a wildcard pattern''' files=TO_LIST(bld.path.ant_glob(pattern)) if trim_path: files2 = [] for f in files: files2.append(os_path_relpath(f, trim_path)) files = files2 if exclude: for f in files[:]: if fnmatch.fnmatch(f, exclude): files.remove(f) INSTALL_FILES(bld, destdir, files, chmod=chmod, flat=flat, python_fixup=python_fixup, base_name=trim_path) Build.BuildContext.INSTALL_WILDCARD = INSTALL_WILDCARD def INSTALL_DIRS(bld, destdir, dirs): '''install a set of directories''' destdir = bld.EXPAND_VARIABLES(destdir) dirs = bld.EXPAND_VARIABLES(dirs) for d in TO_LIST(dirs): bld.install_dir(os.path.join(destdir, d)) Build.BuildContext.INSTALL_DIRS = INSTALL_DIRS def MANPAGES(bld, manpages, install): '''build and install manual pages''' bld.env.MAN_XSL = 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl' for m in manpages.split(): source = m + '.xml' bld.SAMBA_GENERATOR(m, source=source, target=m, group='final', rule='${XSLTPROC} --xinclude -o ${TGT} --nonet ${MAN_XSL} ${SRC}' ) if install: bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True) Build.BuildContext.MANPAGES = MANPAGES def SAMBAMANPAGES(bld, manpages): '''build and install manual pages''' bld.env.SAMBA_EXPAND_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/expand-sambadoc.xsl' bld.env.SAMBA_MAN_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/man.xsl' bld.env.SAMBA_CATALOGS = 'file:///etc/xml/catalog file:///usr/local/share/xml/catalog file://' + bld.srcnode.abspath() + '/bin/default/docs-xml/build/catalog.xml' for m in manpages.split(): source = m + '.xml' bld.SAMBA_GENERATOR(m, source=source, target=m, group='final', rule='''XML_CATALOG_FILES="${SAMBA_CATALOGS}" export XML_CATALOG_FILES ${XSLTPROC} --xinclude --stringparam noreference 0 -o ${TGT}.xml --nonet ${SAMBA_EXPAND_XSL} ${SRC} ${XSLTPROC} --nonet -o ${TGT} ${SAMBA_MAN_XSL} ${TGT}.xml''' ) bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True) Build.BuildContext.SAMBAMANPAGES = SAMBAMANPAGES ############################################################# # give a nicer display when building different types of files def progress_display(self, msg, fname): col1 = Logs.colors(self.color) col2 = Logs.colors.NORMAL total = self.position[1] n = len(str(total)) fs = '[%%%dd/%%%dd] %s %%s%%s%%s\n' % (n, n, msg) return fs % (self.position[0], self.position[1], col1, fname, col2) def link_display(self): if Options.options.progress_bar != 0: return Task.Task.old_display(self) fname = self.outputs[0].bldpath(self.env) return progress_display(self, 'Linking', fname) Task.TaskBase.classes['cc_link'].display = link_display def samba_display(self): if Options.options.progress_bar != 0: return Task.Task.old_display(self) targets = LOCAL_CACHE(self, 'TARGET_TYPE') if self.name in targets: target_type = targets[self.name] type_map = { 'GENERATOR' : 'Generating', 'PROTOTYPE' : 'Generating' } if target_type in type_map: return progress_display(self, type_map[target_type], self.name) if len(self.inputs) == 0: return Task.Task.old_display(self) fname = self.inputs[0].bldpath(self.env) if fname[0:3] == '../': fname = fname[3:] ext_loc = fname.rfind('.') if ext_loc == -1: return Task.Task.old_display(self) ext = fname[ext_loc:] ext_map = { '.idl' : 'Compiling IDL', '.et' : 'Compiling ERRTABLE', '.asn1': 'Compiling ASN1', '.c' : 'Compiling' } if ext in ext_map: return progress_display(self, ext_map[ext], fname) return Task.Task.old_display(self) Task.TaskBase.classes['Task'].old_display = Task.TaskBase.classes['Task'].display Task.TaskBase.classes['Task'].display = samba_display @after('apply_link') @feature('cshlib') def apply_bundle_remove_dynamiclib_patch(self): if self.env['MACBUNDLE'] or getattr(self,'mac_bundle',False): if not getattr(self,'vnum',None): try: self.env['LINKFLAGS'].remove('-dynamiclib') self.env['LINKFLAGS'].remove('-single_module') except ValueError: pass ntdb-1.0/buildtools/wafsamba/wscript000077500000000000000000000461731224151530700176760ustar00rootroot00000000000000#!/usr/bin/env python # this is a base set of waf rules that everything else pulls in first import sys, wafsamba, Configure, Logs import Options, os, preproc from samba_utils import * from optparse import SUPPRESS_HELP # this forces configure to be re-run if any of the configure # sections of the build scripts change. We have to check # for this in sys.argv as options have not yet been parsed when # we need to set this. This is off by default until some issues # are resolved related to WAFCACHE. It will need a lot of testing # before it is enabled by default. if '--enable-auto-reconfigure' in sys.argv: Configure.autoconfig = True def set_options(opt): opt.tool_options('compiler_cc') opt.tool_options('gnu_dirs') gr = opt.option_group('library handling options') gr.add_option('--bundled-libraries', help=("comma separated list of bundled libraries. May include !LIBNAME to disable bundling a library. Can be 'NONE' or 'ALL' [auto]"), action="store", dest='BUNDLED_LIBS', default='') gr.add_option('--private-libraries', help=("comma separated list of normally public libraries to build instead as private libraries. May include !LIBNAME to disable making a library private. Can be 'NONE' or 'ALL' [auto]"), action="store", dest='PRIVATE_LIBS', default='') extension_default = Options.options['PRIVATE_EXTENSION_DEFAULT'] gr.add_option('--private-library-extension', help=("name extension for private libraries [%s]" % extension_default), action="store", dest='PRIVATE_EXTENSION', default=extension_default) extension_exception = Options.options['PRIVATE_EXTENSION_EXCEPTION'] gr.add_option('--private-extension-exception', help=("comma separated list of libraries to not apply extension to [%s]" % extension_exception), action="store", dest='PRIVATE_EXTENSION_EXCEPTION', default=extension_exception) builtin_defauilt = Options.options['BUILTIN_LIBRARIES_DEFAULT'] gr.add_option('--builtin-libraries', help=("command separated list of libraries to build directly into binaries [%s]" % builtin_defauilt), action="store", dest='BUILTIN_LIBRARIES', default=builtin_defauilt) gr.add_option('--minimum-library-version', help=("list of minimum system library versions (LIBNAME1:version,LIBNAME2:version)"), action="store", dest='MINIMUM_LIBRARY_VERSION', default='') gr.add_option('--disable-rpath', help=("Disable use of rpath for build binaries"), action="store_true", dest='disable_rpath_build', default=False) gr.add_option('--disable-rpath-install', help=("Disable use of rpath for library path in installed files"), action="store_true", dest='disable_rpath_install', default=False) gr.add_option('--disable-rpath-private-install', help=("Disable use of rpath for private library path in installed files"), action="store_true", dest='disable_rpath_private_install', default=False) gr.add_option('--nonshared-binary', help=("Disable use of shared libs for the listed binaries"), action="store", dest='NONSHARED_BINARIES', default='') gr.add_option('--disable-symbol-versions', help=("Disable use of the --version-script linker option"), action="store_true", dest='disable_symbol_versions', default=False) opt.add_option('--with-modulesdir', help=("modules directory [PREFIX/modules]"), action="store", dest='MODULESDIR', default='${PREFIX}/modules') opt.add_option('--with-privatelibdir', help=("private library directory [PREFIX/lib/%s]" % Utils.g_module.APPNAME), action="store", dest='PRIVATELIBDIR', default=None) opt.add_option('--with-libiconv', help='additional directory to search for libiconv', action='store', dest='iconv_open', default='/usr/local', match = ['Checking for library iconv', 'Checking for iconv_open', 'Checking for header iconv.h']) opt.add_option('--with-gettext', help='additional directory to search for gettext', action='store', dest='gettext_location', default='/usr/local', match = ['Checking for library intl', 'Checking for header libintl.h']) opt.add_option('--without-gettext', help=("Disable use of gettext"), action="store_true", dest='disable_gettext', default=False) gr = opt.option_group('developer options') gr.add_option('-C', help='enable configure cacheing', action='store_true', dest='enable_configure_cache') gr.add_option('--enable-auto-reconfigure', help='enable automatic reconfigure on build', action='store_true', dest='enable_auto_reconfigure') gr.add_option('--enable-debug', help=("Turn on debugging symbols"), action="store_true", dest='debug', default=False) gr.add_option('--enable-developer', help=("Turn on developer warnings and debugging"), action="store_true", dest='developer', default=False) gr.add_option('--picky-developer', help=("Treat all warnings as errors (enable -Werror)"), action="store_true", dest='picky_developer', default=False) gr.add_option('--fatal-errors', help=("Stop compilation on first error (enable -Wfatal-errors)"), action="store_true", dest='fatal_errors', default=False) gr.add_option('--enable-gccdeps', help=("Enable use of gcc -MD dependency module"), action="store_true", dest='enable_gccdeps', default=True) gr.add_option('--timestamp-dependencies', help=("use file timestamps instead of content for build dependencies (BROKEN)"), action="store_true", dest='timestamp_dependencies', default=False) gr.add_option('--pedantic', help=("Enable even more compiler warnings"), action='store_true', dest='pedantic', default=False) gr.add_option('--git-local-changes', help=("mark version with + if local git changes"), action='store_true', dest='GIT_LOCAL_CHANGES', default=False) gr.add_option('--abi-check', help=("Check ABI signatures for libraries"), action='store_true', dest='ABI_CHECK', default=False) gr.add_option('--abi-check-disable', help=("Disable ABI checking (used with --enable-developer)"), action='store_true', dest='ABI_CHECK_DISABLE', default=False) gr.add_option('--abi-update', help=("Update ABI signature files for libraries"), action='store_true', dest='ABI_UPDATE', default=False) gr.add_option('--show-deps', help=("Show dependency tree for the given target"), dest='SHOWDEPS', default='') gr.add_option('--symbol-check', help=("check symbols in object files against project rules"), action='store_true', dest='SYMBOLCHECK', default=False) gr.add_option('--dup-symbol-check', help=("check for duplicate symbols in object files and system libs (must be configured with --enable-developer)"), action='store_true', dest='DUP_SYMBOLCHECK', default=False) gr.add_option('--why-needed', help=("TARGET:DEPENDENCY check why TARGET needs DEPENDENCY"), action='store', type='str', dest='WHYNEEDED', default=None) gr.add_option('--show-duplicates', help=("Show objects which are included in multiple binaries or libraries"), action='store_true', dest='SHOW_DUPLICATES', default=False) gr = opt.add_option_group('cross compilation options') gr.add_option('--cross-compile', help=("configure for cross-compilation"), action='store_true', dest='CROSS_COMPILE', default=False) gr.add_option('--cross-execute', help=("command prefix to use for cross-execution in configure"), action='store', dest='CROSS_EXECUTE', default='') gr.add_option('--cross-answers', help=("answers to cross-compilation configuration (auto modified)"), action='store', dest='CROSS_ANSWERS', default='') gr.add_option('--hostcc', help=("set host compiler when cross compiling"), action='store', dest='HOSTCC', default=False) # we use SUPPRESS_HELP for these, as they are ignored, and are there only # to allow existing RPM spec files to work opt.add_option('--build', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_BUILD', default='') opt.add_option('--host', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_HOST', default='') opt.add_option('--target', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_TARGET', default='') opt.add_option('--program-prefix', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_PROGRAM_PREFIX', default='') opt.add_option('--disable-dependency-tracking', help=SUPPRESS_HELP, action='store_true', dest='AUTOCONF_DISABLE_DEPENDENCY_TRACKING', default=False) opt.add_option('--disable-silent-rules', help=SUPPRESS_HELP, action='store_true', dest='AUTOCONF_DISABLE_SILENT_RULES', default=False) gr = opt.option_group('dist options') gr.add_option('--sign-release', help='sign the release tarball created by waf dist', action='store_true', dest='SIGN_RELEASE') gr.add_option('--tag', help='tag release in git at the same time', type='string', action='store', dest='TAG_RELEASE') @wafsamba.runonce def configure(conf): conf.env.hlist = [] conf.env.srcdir = conf.srcdir if Options.options.timestamp_dependencies: conf.ENABLE_TIMESTAMP_DEPENDENCIES() conf.SETUP_CONFIGURE_CACHE(Options.options.enable_configure_cache) # load our local waf extensions conf.check_tool('gnu_dirs') conf.check_tool('wafsamba') conf.CHECK_CC_ENV() conf.check_tool('compiler_cc') # we need git for 'waf dist' conf.find_program('git', var='GIT') # older gcc versions (< 4.4) does not work with gccdeps, so we have to see if the .d file is generated if Options.options.enable_gccdeps: from TaskGen import feature, after @feature('testd') @after('apply_core') def check_d(self): tsk = self.compiled_tasks[0] tsk.outputs.append(tsk.outputs[0].change_ext('.d')) import Task cc = Task.TaskBase.classes['cc'] oldmeth = cc.run cc.run = Task.compile_fun_noshell('cc', '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT[0].abspath(env)}')[0] try: try: conf.check(features='cc testd', fragment='int main() {return 0;}\n', ccflags=['-MD'], mandatory=True, msg='Check for -MD') except: pass else: conf.check_tool('gccdeps', tooldir=conf.srcdir + "/buildtools/wafsamba") finally: cc.run = oldmeth # make the install paths available in environment conf.env.LIBDIR = Options.options.LIBDIR or '${PREFIX}/lib' conf.env.BINDIR = Options.options.BINDIR or '${PREFIX}/bin' conf.env.SBINDIR = Options.options.SBINDIR or '${PREFIX}/sbin' conf.env.MODULESDIR = Options.options.MODULESDIR conf.env.PRIVATELIBDIR = Options.options.PRIVATELIBDIR conf.env.BUNDLED_LIBS = Options.options.BUNDLED_LIBS.split(',') conf.env.PRIVATE_LIBS = Options.options.PRIVATE_LIBS.split(',') conf.env.BUILTIN_LIBRARIES = Options.options.BUILTIN_LIBRARIES.split(',') conf.env.NONSHARED_BINARIES = Options.options.NONSHARED_BINARIES.split(',') conf.env.PRIVATE_EXTENSION = Options.options.PRIVATE_EXTENSION conf.env.PRIVATE_EXTENSION_EXCEPTION = Options.options.PRIVATE_EXTENSION_EXCEPTION.split(',') conf.env.CROSS_COMPILE = Options.options.CROSS_COMPILE conf.env.CROSS_EXECUTE = Options.options.CROSS_EXECUTE conf.env.CROSS_ANSWERS = Options.options.CROSS_ANSWERS conf.env.HOSTCC = Options.options.HOSTCC conf.env.AUTOCONF_BUILD = Options.options.AUTOCONF_BUILD conf.env.AUTOCONF_HOST = Options.options.AUTOCONF_HOST conf.env.AUTOCONF_PROGRAM_PREFIX = Options.options.AUTOCONF_PROGRAM_PREFIX if (conf.env.AUTOCONF_HOST and conf.env.AUTOCONF_BUILD and conf.env.AUTOCONF_BUILD != conf.env.AUTOCONF_HOST): Logs.error('ERROR: Mismatch between --build and --host. Please use --cross-compile instead') sys.exit(1) if conf.env.AUTOCONF_PROGRAM_PREFIX: Logs.error('ERROR: --program-prefix not supported') sys.exit(1) # enable ABI checking for developers conf.env.ABI_CHECK = Options.options.ABI_CHECK or Options.options.developer if Options.options.ABI_CHECK_DISABLE: conf.env.ABI_CHECK = False try: conf.find_program('gdb', mandatory=True) except: conf.env.ABI_CHECK = False conf.env.GIT_LOCAL_CHANGES = Options.options.GIT_LOCAL_CHANGES conf.CHECK_COMMAND(['uname', '-a'], msg='Checking build system', define='BUILD_SYSTEM', on_target=False) conf.CHECK_UNAME() # see if we can compile and run a simple C program conf.CHECK_CODE('printf("hello world")', define='HAVE_SIMPLE_C_PROG', mandatory=True, execute=True, headers='stdio.h', msg='Checking simple C program') # check for rpath if conf.CHECK_LIBRARY_SUPPORT(rpath=True): support_rpath = True conf.env.RPATH_ON_BUILD = not Options.options.disable_rpath_build conf.env.RPATH_ON_INSTALL = (conf.env.RPATH_ON_BUILD and not Options.options.disable_rpath_install) if not conf.env.PRIVATELIBDIR: conf.env.PRIVATELIBDIR = '%s/%s' % (conf.env.LIBDIR, Utils.g_module.APPNAME) conf.env.RPATH_ON_INSTALL_PRIVATE = ( not Options.options.disable_rpath_private_install) else: support_rpath = False conf.env.RPATH_ON_INSTALL = False conf.env.RPATH_ON_BUILD = False conf.env.RPATH_ON_INSTALL_PRIVATE = False if not conf.env.PRIVATELIBDIR: # rpath is not possible so there is no sense in having a # private library directory by default. # the user can of course always override it. conf.env.PRIVATELIBDIR = conf.env.LIBDIR if (not Options.options.disable_symbol_versions and conf.CHECK_LIBRARY_SUPPORT(rpath=support_rpath, version_script=True, msg='-Wl,--version-script support')): conf.env.HAVE_LD_VERSION_SCRIPT = True else: conf.env.HAVE_LD_VERSION_SCRIPT = False if sys.platform.startswith('aix'): conf.DEFINE('_ALL_SOURCE', 1, add_to_cflags=True) # Might not be needed if ALL_SOURCE is defined # conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) # we should use the PIC options in waf instead # Some compilo didn't support -fPIC but just print a warning if conf.env['COMPILER_CC'] == "suncc": conf.ADD_CFLAGS('-KPIC', testflags=True) # we really want define here as we need to have this # define even during the tests otherwise detection of # boolean is broken conf.DEFINE('_STDC_C99', 1, add_to_cflags=True) conf.DEFINE('_XPG6', 1, add_to_cflags=True) else: conf.ADD_CFLAGS('-fPIC', testflags=True) # On Solaris 8 with suncc (at least) the flags for the linker to define the name of the # library are not always working (if the command line is very very long and with a lot # files) if conf.env['COMPILER_CC'] == "suncc": save = conf.env['SONAME_ST'] conf.env['SONAME_ST'] = '-Wl,-h,%s' if not conf.CHECK_SHLIB_INTRASINC_NAME_FLAGS("Checking if flags %s are ok" % conf.env['SONAME_ST']): conf.env['SONAME_ST'] = save conf.CHECK_INLINE() # check for pkgconfig conf.check_cfg(atleast_pkgconfig_version='0.0.0') conf.DEFINE('_GNU_SOURCE', 1, add_to_cflags=True) conf.DEFINE('_XOPEN_SOURCE_EXTENDED', 1, add_to_cflags=True) # get the base headers we'll use for the rest of the tests conf.CHECK_HEADERS('stdio.h sys/types.h sys/stat.h stdlib.h stddef.h memory.h string.h', add_headers=True) conf.CHECK_HEADERS('strings.h inttypes.h stdint.h unistd.h minix/config.h', add_headers=True) conf.CHECK_HEADERS('ctype.h', add_headers=True) if sys.platform != 'darwin': conf.CHECK_HEADERS('standards.h', add_headers=True) conf.CHECK_HEADERS('stdbool.h stdint.h stdarg.h vararg.h', add_headers=True) conf.CHECK_HEADERS('limits.h assert.h') # see if we need special largefile flags if not conf.CHECK_LARGEFILE(): raise Utils.WafError('Samba requires large file support support, but not available on this platform: sizeof(off_t) < 8') if 'HAVE_STDDEF_H' in conf.env and 'HAVE_STDLIB_H' in conf.env: conf.DEFINE('STDC_HEADERS', 1) conf.CHECK_HEADERS('sys/time.h time.h', together=True) if 'HAVE_SYS_TIME_H' in conf.env and 'HAVE_TIME_H' in conf.env: conf.DEFINE('TIME_WITH_SYS_TIME', 1) # cope with different extensions for libraries (root, ext) = os.path.splitext(conf.env.shlib_PATTERN) if ext[0] == '.': conf.define('SHLIBEXT', ext[1:], quote=True) else: conf.define('SHLIBEXT', "so", quote=True) conf.CHECK_CODE('long one = 1; return ((char *)(&one))[0]', execute=True, define='WORDS_BIGENDIAN') # check if signal() takes a void function if conf.CHECK_CODE('return *(signal (0, 0)) (0) == 1', define='RETSIGTYPE_INT', execute=False, headers='signal.h', msg='Checking if signal handlers return int'): conf.DEFINE('RETSIGTYPE', 'int') else: conf.DEFINE('RETSIGTYPE', 'void') conf.CHECK_VARIABLE('__FUNCTION__', define='HAVE_FUNCTION_MACRO') conf.CHECK_CODE('va_list ap1,ap2; va_copy(ap1,ap2)', define="HAVE_VA_COPY", msg="Checking for va_copy") conf.CHECK_CODE(''' #define eprintf(...) fprintf(stderr, __VA_ARGS__) eprintf("bla", "bar") ''', define='HAVE__VA_ARGS__MACRO') conf.SAMBA_BUILD_ENV() def build(bld): # give a more useful message if the source directory has moved relpath = os_path_relpath(bld.curdir, bld.srcnode.abspath()) if relpath.find('../') != -1: Logs.error('bld.curdir %s is not a child of %s' % (bld.curdir, bld.srcnode.abspath())) raise Utils.WafError('''The top source directory has moved. Please run distclean and reconfigure''') bld.CHECK_MAKEFLAGS() bld.SETUP_BUILD_GROUPS() bld.ENFORCE_GROUP_ORDERING() bld.CHECK_PROJECT_RULES() ntdb-1.0/check.c000066400000000000000000000472531224151530700135350ustar00rootroot00000000000000 /* Trivial Database 2: free list/block handling Copyright (C) Rusty Russell 2010 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #include #include /* We keep an ordered array of offsets. */ static bool append(struct ntdb_context *ntdb, ntdb_off_t **arr, size_t *num, ntdb_off_t off) { ntdb_off_t *new; if (*num == 0) { new = ntdb->alloc_fn(ntdb, sizeof(ntdb_off_t), ntdb->alloc_data); } else { new = ntdb->expand_fn(*arr, (*num + 1) * sizeof(ntdb_off_t), ntdb->alloc_data); } if (!new) return false; new[(*num)++] = off; *arr = new; return true; } static enum NTDB_ERROR check_header(struct ntdb_context *ntdb, ntdb_off_t *recovery, uint64_t *features, size_t *num_capabilities) { uint64_t hash_test; struct ntdb_header hdr; enum NTDB_ERROR ecode; ntdb_off_t off, next; ecode = ntdb_read_convert(ntdb, 0, &hdr, sizeof(hdr)); if (ecode != NTDB_SUCCESS) { return ecode; } /* magic food should not be converted, so convert back. */ ntdb_convert(ntdb, hdr.magic_food, sizeof(hdr.magic_food)); hash_test = NTDB_HASH_MAGIC; hash_test = ntdb_hash(ntdb, &hash_test, sizeof(hash_test)); if (hdr.hash_test != hash_test) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "check: hash test %llu should be %llu", (long long)hdr.hash_test, (long long)hash_test); } if (strcmp(hdr.magic_food, NTDB_MAGIC_FOOD) != 0) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "check: bad magic '%.*s'", (unsigned)sizeof(hdr.magic_food), hdr.magic_food); } /* Features which are used must be a subset of features offered. */ if (hdr.features_used & ~hdr.features_offered) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "check: features used (0x%llx) which" " are not offered (0x%llx)", (long long)hdr.features_used, (long long)hdr.features_offered); } *features = hdr.features_offered; *recovery = hdr.recovery; if (*recovery) { if (*recovery < sizeof(hdr) || *recovery > ntdb->file->map_size) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check:" " invalid recovery offset %zu", (size_t)*recovery); } } for (off = hdr.capabilities; off && ecode == NTDB_SUCCESS; off = next) { const struct ntdb_capability *cap; enum NTDB_ERROR e; cap = ntdb_access_read(ntdb, off, sizeof(*cap), true); if (NTDB_PTR_IS_ERR(cap)) { return NTDB_PTR_ERR(cap); } /* All capabilities are unknown. */ e = unknown_capability(ntdb, "ntdb_check", cap->type); next = cap->next; ntdb_access_release(ntdb, cap); if (e) return e; (*num_capabilities)++; } /* Don't check reserved: they *can* be used later. */ return NTDB_SUCCESS; } static int off_cmp(const ntdb_off_t *a, const ntdb_off_t *b) { /* Can overflow an int. */ return *a > *b ? 1 : *a < *b ? -1 : 0; } static enum NTDB_ERROR check_entry(struct ntdb_context *ntdb, ntdb_off_t off_and_hash, ntdb_len_t bucket, ntdb_off_t used[], size_t num_used, size_t *num_found, enum NTDB_ERROR (*check)(NTDB_DATA, NTDB_DATA, void *), void *data) { enum NTDB_ERROR ecode; const struct ntdb_used_record *r; const unsigned char *kptr; ntdb_len_t klen, dlen; uint32_t hash; ntdb_off_t off = off_and_hash & NTDB_OFF_MASK; ntdb_off_t *p; /* Empty bucket is fine. */ if (!off_and_hash) { return NTDB_SUCCESS; } /* This can't point to a chain, we handled those at toplevel. */ if (off_and_hash & (1ULL << NTDB_OFF_CHAIN_BIT)) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Invalid chain bit in offset " " %llu", (long long)off_and_hash); } p = asearch(&off, used, num_used, off_cmp); if (!p) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Invalid offset" " %llu in hash", (long long)off); } /* Mark it invalid. */ *p ^= 1; (*num_found)++; r = ntdb_access_read(ntdb, off, sizeof(*r), true); if (NTDB_PTR_IS_ERR(r)) { return NTDB_PTR_ERR(r); } klen = rec_key_length(r); dlen = rec_data_length(r); ntdb_access_release(ntdb, r); kptr = ntdb_access_read(ntdb, off + sizeof(*r), klen + dlen, false); if (NTDB_PTR_IS_ERR(kptr)) { return NTDB_PTR_ERR(kptr); } hash = ntdb_hash(ntdb, kptr, klen); /* Are we in the right chain? */ if (bits_from(hash, 0, ntdb->hash_bits) != bucket) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Bad bucket %u vs %llu", bits_from(hash, 0, ntdb->hash_bits), (long long)bucket); /* Next 8 bits should be the same as top bits of bucket. */ } else if (bits_from(hash, ntdb->hash_bits, NTDB_OFF_UPPER_STEAL) != bits_from(off_and_hash, 64-NTDB_OFF_UPPER_STEAL, NTDB_OFF_UPPER_STEAL)) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Bad hash bits %llu vs %llu", (long long)off_and_hash, (long long)hash); } else if (check) { NTDB_DATA k, d; k = ntdb_mkdata(kptr, klen); d = ntdb_mkdata(kptr + klen, dlen); ecode = check(k, d, data); } else { ecode = NTDB_SUCCESS; } ntdb_access_release(ntdb, kptr); return ecode; } static enum NTDB_ERROR check_hash_chain(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t bucket, ntdb_off_t used[], size_t num_used, size_t *num_found, enum NTDB_ERROR (*check)(NTDB_DATA, NTDB_DATA, void *), void *data) { struct ntdb_used_record rec; enum NTDB_ERROR ecode; const ntdb_off_t *entries; ntdb_len_t i, num; /* This is a used entry. */ (*num_found)++; ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec)); if (ecode != NTDB_SUCCESS) { return ecode; } if (rec_magic(&rec) != NTDB_CHAIN_MAGIC) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Bad hash chain magic %llu", (long long)rec_magic(&rec)); } if (rec_data_length(&rec) % sizeof(ntdb_off_t)) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Bad hash chain data length %llu", (long long)rec_data_length(&rec)); } if (rec_key_length(&rec) != 0) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Bad hash chain key length %llu", (long long)rec_key_length(&rec)); } off += sizeof(rec); num = rec_data_length(&rec) / sizeof(ntdb_off_t); entries = ntdb_access_read(ntdb, off, rec_data_length(&rec), true); if (NTDB_PTR_IS_ERR(entries)) { return NTDB_PTR_ERR(entries); } /* Check each non-deleted entry in chain. */ for (i = 0; i < num; i++) { ecode = check_entry(ntdb, entries[i], bucket, used, num_used, num_found, check, data); if (ecode) { break; } } ntdb_access_release(ntdb, entries); return ecode; } static enum NTDB_ERROR check_hash(struct ntdb_context *ntdb, ntdb_off_t used[], size_t num_used, size_t num_other_used, enum NTDB_ERROR (*check)(NTDB_DATA, NTDB_DATA, void *), void *data) { enum NTDB_ERROR ecode; struct ntdb_used_record rec; const ntdb_off_t *entries; ntdb_len_t i; /* Free tables and capabilities also show up as used, as do we. */ size_t num_found = num_other_used + 1; ecode = ntdb_read_convert(ntdb, NTDB_HASH_OFFSET, &rec, sizeof(rec)); if (ecode != NTDB_SUCCESS) { return ecode; } if (rec_magic(&rec) != NTDB_HTABLE_MAGIC) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Bad hash table magic %llu", (long long)rec_magic(&rec)); } if (rec_data_length(&rec) != (sizeof(ntdb_off_t) << ntdb->hash_bits)) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Bad hash table data length %llu", (long long)rec_data_length(&rec)); } if (rec_key_length(&rec) != 0) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Bad hash table key length %llu", (long long)rec_key_length(&rec)); } entries = ntdb_access_read(ntdb, NTDB_HASH_OFFSET + sizeof(rec), rec_data_length(&rec), true); if (NTDB_PTR_IS_ERR(entries)) { return NTDB_PTR_ERR(entries); } for (i = 0; i < (1 << ntdb->hash_bits); i++) { ntdb_off_t off = entries[i] & NTDB_OFF_MASK; if (entries[i] & (1ULL << NTDB_OFF_CHAIN_BIT)) { ecode = check_hash_chain(ntdb, off, i, used, num_used, &num_found, check, data); } else { ecode = check_entry(ntdb, entries[i], i, used, num_used, &num_found, check, data); } if (ecode) { break; } } ntdb_access_release(ntdb, entries); if (ecode == NTDB_SUCCESS && num_found != num_used) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Not all entries are in hash"); } return ecode; } static enum NTDB_ERROR check_free(struct ntdb_context *ntdb, ntdb_off_t off, const struct ntdb_free_record *frec, ntdb_off_t prev, unsigned int ftable, unsigned int bucket) { enum NTDB_ERROR ecode; if (frec_magic(frec) != NTDB_FREE_MAGIC) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: offset %llu bad magic 0x%llx", (long long)off, (long long)frec->magic_and_prev); } if (frec_ftable(frec) != ftable) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: offset %llu bad freetable %u", (long long)off, frec_ftable(frec)); } ecode = ntdb_oob(ntdb, off, frec_len(frec) + sizeof(struct ntdb_used_record), false); if (ecode != NTDB_SUCCESS) { return ecode; } if (size_to_bucket(frec_len(frec)) != bucket) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: offset %llu in wrong bucket" " (%u vs %u)", (long long)off, bucket, size_to_bucket(frec_len(frec))); } if (prev && prev != frec_prev(frec)) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: offset %llu bad prev" " (%llu vs %llu)", (long long)off, (long long)prev, (long long)frec_len(frec)); } return NTDB_SUCCESS; } static enum NTDB_ERROR check_free_table(struct ntdb_context *ntdb, ntdb_off_t ftable_off, unsigned ftable_num, ntdb_off_t fr[], size_t num_free, size_t *num_found) { struct ntdb_freetable ft; ntdb_off_t h; unsigned int i; enum NTDB_ERROR ecode; ecode = ntdb_read_convert(ntdb, ftable_off, &ft, sizeof(ft)); if (ecode != NTDB_SUCCESS) { return ecode; } if (rec_magic(&ft.hdr) != NTDB_FTABLE_MAGIC || rec_key_length(&ft.hdr) != 0 || rec_data_length(&ft.hdr) != sizeof(ft) - sizeof(ft.hdr)) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Invalid header on free table"); } for (i = 0; i < NTDB_FREE_BUCKETS; i++) { ntdb_off_t off, prev = 0, *p, first = 0; struct ntdb_free_record f; h = bucket_off(ftable_off, i); for (off = ntdb_read_off(ntdb, h); off; off = f.next) { if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } if (!first) { off &= NTDB_OFF_MASK; first = off; } ecode = ntdb_read_convert(ntdb, off, &f, sizeof(f)); if (ecode != NTDB_SUCCESS) { return ecode; } ecode = check_free(ntdb, off, &f, prev, ftable_num, i); if (ecode != NTDB_SUCCESS) { return ecode; } /* FIXME: Check hash bits */ p = asearch(&off, fr, num_free, off_cmp); if (!p) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Invalid offset" " %llu in free table", (long long)off); } /* Mark it invalid. */ *p ^= 1; (*num_found)++; prev = off; } if (first) { /* Now we can check first back pointer. */ ecode = ntdb_read_convert(ntdb, first, &f, sizeof(f)); if (ecode != NTDB_SUCCESS) { return ecode; } ecode = check_free(ntdb, first, &f, prev, ftable_num, i); if (ecode != NTDB_SUCCESS) { return ecode; } } } return NTDB_SUCCESS; } /* Slow, but should be very rare. */ ntdb_off_t dead_space(struct ntdb_context *ntdb, ntdb_off_t off) { size_t len; enum NTDB_ERROR ecode; for (len = 0; off + len < ntdb->file->map_size; len++) { char c; ecode = ntdb->io->tread(ntdb, off, &c, 1); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } if (c != 0 && c != 0x43) break; } return len; } static enum NTDB_ERROR check_linear(struct ntdb_context *ntdb, ntdb_off_t **used, size_t *num_used, ntdb_off_t **fr, size_t *num_free, uint64_t features, ntdb_off_t recovery) { ntdb_off_t off; ntdb_len_t len; enum NTDB_ERROR ecode; bool found_recovery = false; for (off = sizeof(struct ntdb_header); off < ntdb->file->map_size; off += len) { union { struct ntdb_used_record u; struct ntdb_free_record f; struct ntdb_recovery_record r; } rec; /* r is larger: only get that if we need to. */ ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec.f)); if (ecode != NTDB_SUCCESS) { return ecode; } /* If we crash after ftruncate, we can get zeroes or fill. */ if (rec.r.magic == NTDB_RECOVERY_INVALID_MAGIC || rec.r.magic == 0x4343434343434343ULL) { ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec.r)); if (ecode != NTDB_SUCCESS) { return ecode; } if (recovery == off) { found_recovery = true; len = sizeof(rec.r) + rec.r.max_len; } else { len = dead_space(ntdb, off); if (NTDB_OFF_IS_ERR(len)) { return NTDB_OFF_TO_ERR(len); } if (len < sizeof(rec.r)) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: invalid" " dead space at %zu", (size_t)off); } ntdb_logerr(ntdb, NTDB_SUCCESS, NTDB_LOG_WARNING, "Dead space at %zu-%zu (of %zu)", (size_t)off, (size_t)(off + len), (size_t)ntdb->file->map_size); } } else if (rec.r.magic == NTDB_RECOVERY_MAGIC) { ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec.r)); if (ecode != NTDB_SUCCESS) { return ecode; } if (recovery != off) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: unexpected" " recovery record at offset" " %zu", (size_t)off); } if (rec.r.len > rec.r.max_len) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: invalid recovery" " length %zu", (size_t)rec.r.len); } if (rec.r.eof > ntdb->file->map_size) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: invalid old EOF" " %zu", (size_t)rec.r.eof); } found_recovery = true; len = sizeof(rec.r) + rec.r.max_len; } else if (frec_magic(&rec.f) == NTDB_FREE_MAGIC) { len = sizeof(rec.u) + frec_len(&rec.f); if (off + len > ntdb->file->map_size) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: free overlength" " %llu at offset %llu", (long long)len, (long long)off); } /* This record should be in free lists. */ if (frec_ftable(&rec.f) != NTDB_FTABLE_NONE && !append(ntdb, fr, num_free, off)) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_check: tracking %zu'th" " free record.", *num_free); } } else if (rec_magic(&rec.u) == NTDB_USED_MAGIC || rec_magic(&rec.u) == NTDB_CHAIN_MAGIC || rec_magic(&rec.u) == NTDB_HTABLE_MAGIC || rec_magic(&rec.u) == NTDB_FTABLE_MAGIC || rec_magic(&rec.u) == NTDB_CAP_MAGIC) { uint64_t klen, dlen, extra; /* This record is used! */ if (!append(ntdb, used, num_used, off)) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_check: tracking %zu'th" " used record.", *num_used); } klen = rec_key_length(&rec.u); dlen = rec_data_length(&rec.u); extra = rec_extra_padding(&rec.u); len = sizeof(rec.u) + klen + dlen + extra; if (off + len > ntdb->file->map_size) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: used overlength" " %llu at offset %llu", (long long)len, (long long)off); } if (len < sizeof(rec.f)) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: too short record" " %llu at %llu", (long long)len, (long long)off); } /* Check that records have correct 0 at end (but may * not in future). */ if (extra && !features && rec_magic(&rec.u) != NTDB_CAP_MAGIC) { const char *p; char c; p = ntdb_access_read(ntdb, off + sizeof(rec.u) + klen + dlen, 1, false); if (NTDB_PTR_IS_ERR(p)) return NTDB_PTR_ERR(p); c = *p; ntdb_access_release(ntdb, p); if (c != '\0') { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check:" " non-zero extra" " at %llu", (long long)off); } } } else { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Bad magic 0x%llx" " at offset %zu", (long long)rec_magic(&rec.u), (size_t)off); } } /* We must have found recovery area if there was one. */ if (recovery != 0 && !found_recovery) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: expected a recovery area at %zu", (size_t)recovery); } return NTDB_SUCCESS; } _PUBLIC_ enum NTDB_ERROR ntdb_check_(struct ntdb_context *ntdb, enum NTDB_ERROR (*check)(NTDB_DATA, NTDB_DATA, void *), void *data) { ntdb_off_t *fr = NULL, *used = NULL; ntdb_off_t ft = 0, recovery = 0; size_t num_free = 0, num_used = 0, num_found = 0, num_ftables = 0, num_capabilities = 0; uint64_t features = 0; enum NTDB_ERROR ecode; if (ntdb->flags & NTDB_CANT_CHECK) { return ntdb_logerr(ntdb, NTDB_SUCCESS, NTDB_LOG_WARNING, "ntdb_check: database has unknown capability," " cannot check."); } ecode = ntdb_allrecord_lock(ntdb, F_RDLCK, NTDB_LOCK_WAIT, false); if (ecode != NTDB_SUCCESS) { return ecode; } ecode = ntdb_lock_expand(ntdb, F_RDLCK); if (ecode != NTDB_SUCCESS) { ntdb_allrecord_unlock(ntdb, F_RDLCK); return ecode; } ecode = check_header(ntdb, &recovery, &features, &num_capabilities); if (ecode != NTDB_SUCCESS) goto out; /* First we do a linear scan, checking all records. */ ecode = check_linear(ntdb, &used, &num_used, &fr, &num_free, features, recovery); if (ecode != NTDB_SUCCESS) goto out; for (ft = first_ftable(ntdb); ft; ft = next_ftable(ntdb, ft)) { if (NTDB_OFF_IS_ERR(ft)) { ecode = NTDB_OFF_TO_ERR(ft); goto out; } ecode = check_free_table(ntdb, ft, num_ftables, fr, num_free, &num_found); if (ecode != NTDB_SUCCESS) goto out; num_ftables++; } /* FIXME: Check key uniqueness? */ ecode = check_hash(ntdb, used, num_used, num_ftables + num_capabilities, check, data); if (ecode != NTDB_SUCCESS) goto out; if (num_found != num_free) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_check: Not all entries are in" " free table"); } out: ntdb_allrecord_unlock(ntdb, F_RDLCK); ntdb_unlock_expand(ntdb, F_RDLCK); ntdb->free_fn(fr, ntdb->alloc_data); ntdb->free_fn(used, ntdb->alloc_data); return ecode; } ntdb-1.0/configure000077500000000000000000000006501224151530700142110ustar00rootroot00000000000000#!/bin/sh PREVPATH=`dirname $0` if [ -f $PREVPATH/../../buildtools/bin/waf ]; then WAF=../../buildtools/bin/waf elif [ -f $PREVPATH/buildtools/bin/waf ]; then WAF=./buildtools/bin/waf else echo "replace: Unable to find waf" exit 1 fi # using JOBS=1 gives maximum compatibility with # systems like AIX which have broken threading in python JOBS=1 export JOBS cd . || exit 1 $WAF configure "$@" || exit 1 cd $PREVPATH ntdb-1.0/doc/000077500000000000000000000000001224151530700130465ustar00rootroot00000000000000ntdb-1.0/doc/TDB_porting.txt000066400000000000000000000276011224151530700157700ustar00rootroot00000000000000Interface differences between TDB and NTDB. - ntdb shares 'struct TDB_DATA' with tdb, but TDB defines the TDB_DATA typedef, whereas ntdb defines NTDB_DATA (ie. both are compatible). If you include both ntdb.h and tdb.h, #include tdb.h first, otherwise you'll get a compile error when tdb.h re-defined struct TDB_DATA. Example: #include #include - ntdb functions return NTDB_SUCCESS (ie 0) on success, and a negative error on failure, whereas tdb functions returned 0 on success, and -1 on failure. tdb then used tdb_error() to determine the error; this API is nasty if we ever want to support threads, so is not supported. Example: #include #include void tdb_example(struct tdb_context *tdb, TDB_DATA key, TDB_DATA d) { if (tdb_store(tdb, key, d) == -1) { printf("store failed: %s\n", tdb_errorstr(tdb)); } } void ntdb_example(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA d) { enum NTDB_ERROR e; e = ntdb_store(ntdb, key, d); if (e) { printf("store failed: %s\n", ntdb_errorstr(e)); } } - ntdb's ntdb_fetch() returns an error, tdb's returned the data directly (or tdb_null, and you were supposed to check tdb_error() to find out why). Example: #include #include void tdb_example(struct tdb_context *tdb, TDB_DATA key) { TDB_DATA data; data = tdb_fetch(tdb, key); if (!data.dptr) { printf("fetch failed: %s\n", tdb_errorstr(tdb)); } } void ntdb_example(struct ntdb_context *ntdb, NTDB_DATA key) { NTDB_DATA data; enum NTDB_ERROR e; e = ntdb_fetch(ntdb, key, &data); if (e) { printf("fetch failed: %s\n", ntdb_errorstr(e)); } } - ntdb's ntdb_nextkey() frees the old key's dptr, in tdb you needed to do this manually. Example: #include #include void tdb_example(struct tdb_context *tdb) { TDB_DATA key, next, data; for (key = tdb_firstkey(tdb); key.dptr; key = next) { printf("Got key!\n"); next = tdb_nextkey(tdb, key); free(key.dptr); } } void ntdb_example(struct ntdb_context *ntdb) { NTDB_DATA k, data; enum NTDB_ERROR e; for (e = ntdb_firstkey(ntdb,&k); !e; e = ntdb_nextkey(ntdb,&k)) printf("Got key!\n"); } - Unlike tdb_open/tdb_open_ex, ntdb_open does not allow NULL names, even for NTDB_INTERNAL dbs, and thus ntdb_name() never returns NULL. Example: #include #include struct tdb_context *tdb_example(void) { return tdb_open(NULL, 0, TDB_INTERNAL, O_RDWR, 0); } struct ntdb_context *ntdb_example(void) { return ntdb_open("example", NTDB_INTERNAL, O_RDWR, 0); } - ntdb uses a linked list of attribute structures to implement logging and alternate hashes. tdb used tdb_open_ex, which was not extensible. Example: #include #include /* Custom hash function */ static unsigned int my_tdb_hash_func(TDB_DATA *key) { return key->dsize; } struct tdb_context *tdb_example(void) { return tdb_open_ex("example.tdb", 0, TDB_DEFAULT, O_CREAT|O_RDWR, 0600, NULL, my_hash_func); } /* Custom hash function */ static unsigned int my_ntdb_hash_func(const void *key, size_t len, uint32_t seed, void *data) { return len; } struct ntdb_context *ntdb_example(void) { union ntdb_attribute hash; hash.base.attr = NTDB_ATTRIBUTE_HASH; hash.base.next = NULL; hash.hash.fn = my_ntdb_hash_func; return ntdb_open("example.ntdb", NTDB_DEFAULT, O_CREAT|O_RDWR, 0600, &hash); } - tdb's tdb_open/tdb_open_ex took an explicit hash size, defaulting to 131. ntdb's uses an attribute for this, defaulting to 8192. Example: #include #include struct tdb_context *tdb_example(void) { return tdb_open("example.tdb", 10007, TDB_DEFAULT, O_CREAT|O_RDWR, 0600); } struct ntdb_context *ntdb_example(void) { union ntdb_attribute hashsize; hashsize.base.attr = NTDB_ATTRIBUTE_HASHSIZE; hashsize.base.next = NULL; hashsize.hashsize.size = 16384; return ntdb_open("example.ntdb", NTDB_DEFAULT, O_CREAT|O_RDWR, 0600, &hashsize); } - ntdb's log function is simpler than tdb's log function. The string is already formatted, is not terminated by a '\n', and it takes an enum ntdb_log_level not a tdb_debug_level, and which has only three values: NTDB_LOG_ERROR, NTDB_LOG_USE_ERROR and NTDB_LOG_WARNING. #include #include static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; const char *name; switch (level) { case TDB_DEBUG_FATAL: fprintf(stderr, "FATAL: "); break; case TDB_DEBUG_ERROR: fprintf(stderr, "ERROR: "); break; case TDB_DEBUG_WARNING: fprintf(stderr, "WARNING: "); break; case TDB_DEBUG_TRACE: /* Don't print out tracing. */ return; } name = tdb_name(tdb); if (!name) { name = "unnamed"; } fprintf(stderr, "tdb(%s):", name); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } struct tdb_context *tdb_example(void) { struct tdb_logging_context lctx; lctx.log_fn = tdb_log; return tdb_open_ex("example.tdb", 0, TDB_DEFAULT, O_CREAT|O_RDWR, 0600, &lctx, NULL); } static void ntdb_log(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *data) { switch (level) { case NTDB_LOG_ERROR: fprintf(stderr, "ERROR: "); break; case NTDB_LOG_USE_ERROR: /* We made a mistake, so abort. */ abort(); break; case NTDB_LOG_WARNING: fprintf(stderr, "WARNING: "); break; } fprintf(stderr, "ntdb(%s):%s:%s\n", ntdb_name(ntdb), ntdb_errorstr(ecode), message); } struct ntdb_context *ntdb_example(void) { union ntdb_attribute log; log.base.attr = NTDB_ATTRIBUTE_LOG; log.base.next = NULL; log.log.fn = ntdb_log; return ntdb_open("example.ntdb", NTDB_DEFAULT, O_CREAT|O_RDWR, 0600, &log); } - ntdb provides ntdb_deq() for comparing two NTDB_DATA, and ntdb_mkdata() for creating an NTDB_DATA. #include #include void tdb_example(struct tdb_context *tdb) { TDB_DATA data, key; key.dsize = strlen("hello"); key.dptr = "hello"; data = tdb_fetch(tdb, key); if (data.dsize == key.dsize && !memcmp(data.dptr, key.dptr, key.dsize)) printf("key is same as data\n"); } free(data.dptr); } void ntdb_example(struct ntdb_context *ntdb) { NTDB_DATA data, key; key = ntdb_mkdata("hello", strlen("hello")); if (ntdb_fetch(ntdb, key, &data) == NTDB_SUCCESS) { if (ntdb_deq(key, data)) { printf("key is same as data\n"); } free(data.dptr); } } - ntdb's ntdb_parse_record() takes a type-checked callback data pointer, not a void * (though a void * pointer still works). The callback function is allowed to do read operations on the database, or write operations if you first call ntdb_lockall(). TDB's tdb_parse_record() did not allow any database access within the callback, could crash if you tried. Example: #include #include static int tdb_parser(TDB_DATA key, TDB_DATA data, void *private_data) { TDB_DATA *expect = private_data; return data.dsize == expect->dsize && !memcmp(data.dptr, expect->dptr, data.dsize); } void tdb_example(struct tdb_context *tdb, TDB_DATA key, NTDB_DATA d) { switch (tdb_parse_record(tdb, key, tdb_parser, &d)) { case -1: printf("parse failed: %s\n", tdb_errorstr(tdb)); break; case 0: printf("data was different!\n"); break; case 1: printf("data was same!\n"); break; } } static int ntdb_parser(TDB_DATA key, TDB_DATA data, TDB_DATA *expect) { return ntdb_deq(data, *expect); } void ntdb_example(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA d) { enum NTDB_ERROR e; e = tdb_parse_record(tdb, key, tdb_parser, &d); switch (e) { case 0: printf("data was different!\n"); break; case 1: printf("data was same!\n"); break; default: printf("parse failed: %s\n", ntdb_errorstr(e)); break; } } - ntdb does locking on read-only databases (ie. O_RDONLY passed to ntdb_open). tdb did not: use the NTDB_NOLOCK flag if you want to suppress locking. Example: #include #include struct tdb_context *tdb_example(void) { return tdb_open("example.tdb", 0, TDB_DEFAULT, O_RDONLY, 0); } struct ntdb_context *ntdb_example(void) { return ntdb_open("example.ntdb", NTDB_NOLOCK, O_RDONLY, NULL); } - Failure inside a transaction (such as a lock function failing) does not implicitly cancel the transaction; you still need to call ntdb_transaction_cancel(). #include #include void tdb_example(struct tdb_context *tdb, TDB_DATA key, TDB_DATA d) { if (tdb_transaction_start(tdb) == -1) { printf("transaction failed: %s\n", tdb_errorstr(tdb)); return; } if (tdb_store(tdb, key, d) == -1) { printf("store failed: %s\n", tdb_errorstr(tdb)); return; } if (tdb_transaction_commit(tdb) == -1) { printf("commit failed: %s\n", tdb_errorstr(tdb)); } } void ntdb_example(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA d) { enum NTDB_ERROR e; e = ntdb_transaction_start(ntdb); if (e) { printf("transaction failed: %s\n", ntdb_errorstr(e)); return; } e = ntdb_store(ntdb, key, d); if (e) { printf("store failed: %s\n", ntdb_errorstr(e)); ntdb_transaction_cancel(ntdb); } e = ntdb_transaction_commit(ntdb); if (e) { printf("commit failed: %s\n", ntdb_errorstr(e)); } } - There is no NTDB_CLEAR_IF_FIRST flag; it has severe scalability and API problems. If necessary, you can emulate this by using the open hook and placing a 1-byte lock at offset 4. If your program forks and exits, you will need to place this lock again in the child before the parent exits. Example: #include #include struct tdb_context *tdb_example(void) { return tdb_open("example.tdb", 0, TDB_CLEAR_IF_FIRST, O_CREAT|O_RDWR, 0600); } static enum NTDB_ERROR clear_if_first(int fd, void *unused) { /* We hold a lock offset 4 always, so we can tell if * anyone else is. */ struct flock fl; fl.l_type = F_WRLCK; fl.l_whence = SEEK_SET; fl.l_start = 4; /* ACTIVE_LOCK */ fl.l_len = 1; if (fcntl(fd, F_SETLK, &fl) == 0) { /* We must be first ones to open it! Clear it. */ if (ftruncate(fd, 0) != 0) { return NTDB_ERR_IO; } } fl.l_type = F_RDLCK; if (fcntl(fd, F_SETLKW, &fl) != 0) { return NTDB_ERR_IO; } return NTDB_SUCCESS; } struct ntdb_context *ntdb_example(void) { union ntdb_attribute open_attr; open_attr.openhook.base.attr = NTDB_ATTRIBUTE_OPENHOOK; open_attr.openhook.base.next = NULL; open_attr.openhook.fn = clear_if_first; return ntdb_open("example.ntdb", NTDB_DEFAULT, O_CREAT|O_RDWR, 0600, &open_attr); } - ntdb traversals are not reliable if the database is changed during the traversal, ie your traversal may not cover all elements, or may cover elements multiple times. As a special exception, deleting the current record within ntdb_traverse() is reliable. - There is no ntdb_traverse_read, since ntdb_traverse does not hold a lock across the entire traversal anyway. If you want to make sure that your traversal function does not write to the database, you can set and clear the NTDB_RDONLY flag around the traversal. - ntdb does not need tdb_reopen() or tdb_reopen_all(). If you call fork() after during certain operations the child should close the ntdb, or complete the operations before continuing to use the tdb: ntdb_transaction_start(): child must ntdb_transaction_cancel() ntdb_lockall(): child must call ntdb_unlockall() ntdb_lockall_read(): child must call ntdb_unlockall_read() ntdb_chainlock(): child must call ntdb_chainunlock() ntdb_parse() callback: child must return from ntdb_parse() - ntdb will not open a non-ntdb file, even if O_CREAT is specified. tdb will overwrite an unknown file in that case. ntdb-1.0/doc/design.lyx000066400000000000000000002034071224151530700150630ustar00rootroot00000000000000#LyX 2.0 created this file. For more info see http://www.lyx.org/ \lyxformat 413 \begin_document \begin_header \textclass article \use_default_options true \maintain_unincluded_children false \language english \language_package default \inputencoding auto \fontencoding global \font_roman default \font_sans default \font_typewriter default \font_default_family default \use_non_tex_fonts false \font_sc false \font_osf false \font_sf_scale 100 \font_tt_scale 100 \graphics default \default_output_format default \output_sync 0 \bibtex_command default \index_command default \paperfontsize default \use_hyperref false \papersize default \use_geometry false \use_amsmath 1 \use_esint 1 \use_mhchem 1 \use_mathdots 1 \cite_engine basic \use_bibtopic false \use_indices false \paperorientation portrait \suppress_date false \use_refstyle 0 \index Index \shortcut idx \color #008000 \end_index \secnumdepth 3 \tocdepth 3 \paragraph_separation indent \paragraph_indentation default \quotes_language english \papercolumns 1 \papersides 1 \paperpagestyle default \tracking_changes true \output_changes true \html_math_output 0 \html_css_as_file 0 \html_be_strict false \end_header \begin_body \begin_layout Title NTDB: Redesigning The Trivial DataBase \end_layout \begin_layout Author Rusty Russell, IBM Corporation \end_layout \begin_layout Date 19 June 2012 \end_layout \begin_layout Abstract The Trivial DataBase on-disk format is 32 bits; with usage cases heading towards the 4G limit, that must change. This required breakage provides an opportunity to revisit TDB's other design decisions and reassess them. \end_layout \begin_layout Section Introduction \end_layout \begin_layout Standard The Trivial DataBase was originally written by Andrew Tridgell as a simple key/data pair storage system with the same API as dbm, but allowing multiple readers and writers while being small enough (< 1000 lines of C) to include in SAMBA. The simple design created in 1999 has proven surprisingly robust and performant , used in Samba versions 3 and 4 as well as numerous other projects. Its useful life was greatly increased by the (backwards-compatible!) addition of transaction support in 2005. \end_layout \begin_layout Standard The wider variety and greater demands of TDB-using code has lead to some organic growth of the API, as well as some compromises on the implementation. None of these, by themselves, are seen as show-stoppers, but the cumulative effect is to a loss of elegance over the initial, simple TDB implementation. Here is a table of the approximate number of lines of implementation code and number of API functions at the end of each year: \end_layout \begin_layout Standard \begin_inset Tabular \begin_inset Text \begin_layout Plain Layout Year End \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout API Functions \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout Lines of C Code Implementation \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1999 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 13 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1195 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2000 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 24 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1725 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2001 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 32 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2228 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2002 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 35 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2481 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2003 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 35 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2552 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2004 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 40 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2584 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2005 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 38 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2647 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2006 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 52 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 3754 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2007 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 66 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 4398 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2008 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 71 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 4768 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2009 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 73 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 5715 \end_layout \end_inset \end_inset \end_layout \begin_layout Standard This review is an attempt to catalog and address all the known issues with TDB and create solutions which address the problems without significantly increasing complexity; all involved are far too aware of the dangers of second system syndrome in rewriting a successful project like this. \end_layout \begin_layout Standard Note: the final decision was to make ntdb a separate library, with a separarate 'ntdb' namespace so both can potentially be linked together. This document still refers to \begin_inset Quotes eld \end_inset tdb \begin_inset Quotes erd \end_inset everywhere, for simplicity. \end_layout \begin_layout Section API Issues \end_layout \begin_layout Subsection tdb_open_ex Is Not Expandable \end_layout \begin_layout Standard The tdb_open() call was expanded to tdb_open_ex(), which added an optional hashing function and an optional logging function argument. Additional arguments to open would require the introduction of a tdb_open_ex2 call etc. \end_layout \begin_layout Subsubsection Proposed Solution \begin_inset CommandInset label LatexCommand label name "attributes" \end_inset \end_layout \begin_layout Standard tdb_open() will take a linked-list of attributes: \end_layout \begin_layout LyX-Code enum tdb_attribute { \end_layout \begin_layout LyX-Code TDB_ATTRIBUTE_LOG = 0, \end_layout \begin_layout LyX-Code TDB_ATTRIBUTE_HASH = 1 \end_layout \begin_layout LyX-Code }; \end_layout \begin_layout LyX-Code struct tdb_attribute_base { \end_layout \begin_layout LyX-Code enum tdb_attribute attr; \end_layout \begin_layout LyX-Code union tdb_attribute *next; \end_layout \begin_layout LyX-Code }; \end_layout \begin_layout LyX-Code struct tdb_attribute_log { \end_layout \begin_layout LyX-Code struct tdb_attribute_base base; /* .attr = TDB_ATTRIBUTE_LOG */ \end_layout \begin_layout LyX-Code tdb_log_func log_fn; \end_layout \begin_layout LyX-Code void *log_private; \end_layout \begin_layout LyX-Code }; \end_layout \begin_layout LyX-Code struct tdb_attribute_hash { \end_layout \begin_layout LyX-Code struct tdb_attribute_base base; /* .attr = TDB_ATTRIBUTE_HASH */ \end_layout \begin_layout LyX-Code tdb_hash_func hash_fn; \end_layout \begin_layout LyX-Code void *hash_private; \end_layout \begin_layout LyX-Code }; \end_layout \begin_layout LyX-Code union tdb_attribute { \end_layout \begin_layout LyX-Code struct tdb_attribute_base base; \end_layout \begin_layout LyX-Code struct tdb_attribute_log log; \end_layout \begin_layout LyX-Code struct tdb_attribute_hash hash; \end_layout \begin_layout LyX-Code }; \end_layout \begin_layout Standard This allows future attributes to be added, even if this expands the size of the union. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection tdb_traverse Makes Impossible Guarantees \end_layout \begin_layout Standard tdb_traverse (and tdb_firstkey/tdb_nextkey) predate transactions, and it was thought that it was important to guarantee that all records which exist at the start and end of the traversal would be included, and no record would be included twice. \end_layout \begin_layout Standard This adds complexity (see \begin_inset CommandInset ref LatexCommand ref reference "Reliable-Traversal-Adds" \end_inset ) and does not work anyway for records which are altered (in particular, those which are expanded may be effectively deleted and re-added behind the traversal). \end_layout \begin_layout Subsubsection \begin_inset CommandInset label LatexCommand label name "traverse-Proposed-Solution" \end_inset Proposed Solution \end_layout \begin_layout Standard Abandon the guarantee. You will see every record if no changes occur during your traversal, otherwise you will see some subset. You can prevent changes by using a transaction or the locking API. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. Delete-during-traverse will still delete every record, too (assuming no other changes). \end_layout \begin_layout Subsection Nesting of Transactions Is Fraught \end_layout \begin_layout Standard TDB has alternated between allowing nested transactions and not allowing them. Various paths in the Samba codebase assume that transactions will nest, and in a sense they can: the operation is only committed to disk when the outer transaction is committed. There are two problems, however: \end_layout \begin_layout Enumerate Canceling the inner transaction will cause the outer transaction commit to fail, and will not undo any operations since the inner transaction began. This problem is soluble with some additional internal code. \end_layout \begin_layout Enumerate An inner transaction commit can be cancelled by the outer transaction. This is desirable in the way which Samba's database initialization code uses transactions, but could be a surprise to any users expecting a successful transaction commit to expose changes to others. \end_layout \begin_layout Standard The current solution is to specify the behavior at tdb_open(), with the default currently that nested transactions are allowed. This flag can also be changed at runtime. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard Given the usage patterns, it seems that the \begin_inset Quotes eld \end_inset least-surprise \begin_inset Quotes erd \end_inset behavior of disallowing nested transactions should become the default. Additionally, it seems the outer transaction is the only code which knows whether inner transactions should be allowed, so a flag to indicate this could be added to tdb_transaction_start. However, this behavior can be simulated with a wrapper which uses tdb_add_flags () and tdb_remove_flags(), so the API should not be expanded for this relatively -obscure case. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete; the nesting flag has been removed. \end_layout \begin_layout Subsection Incorrect Hash Function is Not Detected \end_layout \begin_layout Standard tdb_open_ex() allows the calling code to specify a different hash function to use, but does not check that all other processes accessing this tdb are using the same hash function. The result is that records are missing from tdb_fetch(). \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard The header should contain an example hash result (eg. the hash of 0xdeadbeef), and tdb_open_ex() should check that the given hash function produces the same answer, or fail the tdb_open call. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection tdb_set_max_dead/TDB_VOLATILE Expose Implementation \end_layout \begin_layout Standard In response to scalability issues with the free list ( \begin_inset CommandInset ref LatexCommand ref reference "TDB-Freelist-Is" \end_inset ) two API workarounds have been incorporated in TDB: tdb_set_max_dead() and the TDB_VOLATILE flag to tdb_open. The latter actually calls the former with an argument of \begin_inset Quotes eld \end_inset 5 \begin_inset Quotes erd \end_inset . \end_layout \begin_layout Standard This code allows deleted records to accumulate without putting them in the free list. On delete we iterate through each chain and free them in a batch if there are more than max_dead entries. These are never otherwise recycled except as a side-effect of a tdb_repack. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard With the scalability problems of the freelist solved, this API can be removed. The TDB_VOLATILE flag may still be useful as a hint that store and delete of records will be at least as common as fetch in order to allow some internal tuning, but initially will become a no-op. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. Unknown flags cause tdb_open() to fail as well, so they can be detected at runtime. \end_layout \begin_layout Subsection \begin_inset CommandInset label LatexCommand label name "TDB-Files-Cannot" \end_inset TDB Files Cannot Be Opened Multiple Times In The Same Process \end_layout \begin_layout Standard No process can open the same TDB twice; we check and disallow it. This is an unfortunate side-effect of fcntl locks, which operate on a per-file rather than per-file-descriptor basis, and do not nest. Thus, closing any file descriptor on a file clears all the locks obtained by this process, even if they were placed using a different file descriptor! \end_layout \begin_layout Standard Note that even if this were solved, deadlock could occur if operations were nested: this is a more manageable programming error in most cases. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard We could lobby POSIX to fix the perverse rules, or at least lobby Linux to violate them so that the most common implementation does not have this restriction. This would be a generally good idea for other fcntl lock users. \end_layout \begin_layout Standard Samba uses a wrapper which hands out the same tdb_context to multiple callers if this happens, and does simple reference counting. We should do this inside the tdb library, which already emulates lock nesting internally; it would need to recognize when deadlock occurs within a single process. This would create a new failure mode for tdb operations (while we currently handle locking failures, they are impossible in normal use and a process encountering them can do little but give up). \end_layout \begin_layout Standard I do not see benefit in an additional tdb_open flag to indicate whether re-opening is allowed, as though there may be some benefit to adding a call to detect when a tdb_context is shared, to allow other to create such an API. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection TDB API Is Not POSIX Thread-safe \end_layout \begin_layout Standard The TDB API uses an error code which can be queried after an operation to determine what went wrong. This programming model does not work with threads, unless specific additional guarantees are given by the implementation. In addition, even otherwise-independent threads cannot open the same TDB (as in \begin_inset CommandInset ref LatexCommand ref reference "TDB-Files-Cannot" \end_inset ). \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard Reachitecting the API to include a tdb_errcode pointer would be a great deal of churn, but fortunately most functions return 0 on success and -1 on error: we can change these to return 0 on success and a negative error code on error, and the API remains similar to previous. The tdb_fetch, tdb_firstkey and tdb_nextkey functions need to take a TDB_DATA pointer and return an error code. It is also simpler to have tdb_nextkey replace its key argument in place, freeing up any old .dptr. \end_layout \begin_layout Standard Internal locking is required to make sure that fcntl locks do not overlap between threads, and also that the global list of tdbs is maintained. \end_layout \begin_layout Standard The aim is that building tdb with -DTDB_PTHREAD will result in a pthread-safe version of the library, and otherwise no overhead will exist. Alternatively, a hooking mechanism similar to that proposed for \begin_inset CommandInset ref LatexCommand ref reference "Proposed-Solution-locking-hook" \end_inset could be used to enable pthread locking at runtime. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Incomplete; API has been changed but thread safety has not been implemented. \end_layout \begin_layout Subsection *_nonblock Functions And *_mark Functions Expose Implementation \end_layout \begin_layout Standard CTDB \begin_inset Foot status collapsed \begin_layout Plain Layout Clustered TDB, see http://ctdb.samba.org \end_layout \end_inset wishes to operate on TDB in a non-blocking manner. This is currently done as follows: \end_layout \begin_layout Enumerate Call the _nonblock variant of an API function (eg. tdb_lockall_nonblock). If this fails: \end_layout \begin_layout Enumerate Fork a child process, and wait for it to call the normal variant (eg. tdb_lockall). \end_layout \begin_layout Enumerate If the child succeeds, call the _mark variant to indicate we already have the locks (eg. tdb_lockall_mark). \end_layout \begin_layout Enumerate Upon completion, tell the child to release the locks (eg. tdb_unlockall). \end_layout \begin_layout Enumerate Indicate to tdb that it should consider the locks removed (eg. tdb_unlockall_mark). \end_layout \begin_layout Standard There are several issues with this approach. Firstly, adding two new variants of each function clutters the API for an obscure use, and so not all functions have three variants. Secondly, it assumes that all paths of the functions ask for the same locks, otherwise the parent process will have to get a lock which the child doesn't have under some circumstances. I don't believe this is currently the case, but it constrains the implementatio n. \end_layout \begin_layout Subsubsection \begin_inset CommandInset label LatexCommand label name "Proposed-Solution-locking-hook" \end_inset Proposed Solution \end_layout \begin_layout Standard Implement a hook for locking methods, so that the caller can control the calls to create and remove fcntl locks. In this scenario, ctdbd would operate as follows: \end_layout \begin_layout Enumerate Call the normal API function, eg tdb_lockall(). \end_layout \begin_layout Enumerate When the lock callback comes in, check if the child has the lock. Initially, this is always false. If so, return 0. Otherwise, try to obtain it in non-blocking mode. If that fails, return EWOULDBLOCK. \end_layout \begin_layout Enumerate Release locks in the unlock callback as normal. \end_layout \begin_layout Enumerate If tdb_lockall() fails, see if we recorded a lock failure; if so, call the child to repeat the operation. \end_layout \begin_layout Enumerate The child records what locks it obtains, and returns that information to the parent. \end_layout \begin_layout Enumerate When the child has succeeded, goto 1. \end_layout \begin_layout Standard This is flexible enough to handle any potential locking scenario, even when lock requirements change. It can be optimized so that the parent does not release locks, just tells the child which locks it doesn't need to obtain. \end_layout \begin_layout Standard It also keeps the complexity out of the API, and in ctdbd where it is needed. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection tdb_chainlock Functions Expose Implementation \end_layout \begin_layout Standard tdb_chainlock locks some number of records, including the record indicated by the given key. This gave atomicity guarantees; no-one can start a transaction, alter, read or delete that key while the lock is held. \end_layout \begin_layout Standard It also makes the same guarantee for any other key in the chain, which is an internal implementation detail and potentially a cause for deadlock. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard None. It would be nice to have an explicit single entry lock which effected no other keys. Unfortunately, this won't work for an entry which doesn't exist. Thus while chainlock may be implemented more efficiently for the existing case, it will still have overlap issues with the non-existing case. So it is best to keep the current (lack of) guarantee about which records will be effected to avoid constraining our implementation. \end_layout \begin_layout Subsection Signal Handling is Not Race-Free \end_layout \begin_layout Standard The tdb_setalarm_sigptr() call allows the caller's signal handler to indicate that the tdb locking code should return with a failure, rather than trying again when a signal is received (and errno == EAGAIN). This is usually used to implement timeouts. \end_layout \begin_layout Standard Unfortunately, this does not work in the case where the signal is received before the tdb code enters the fcntl() call to place the lock: the code will sleep within the fcntl() code, unaware that the signal wants it to exit. In the case of long timeouts, this does not happen in practice. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard The locking hooks proposed in \begin_inset CommandInset ref LatexCommand ref reference "Proposed-Solution-locking-hook" \end_inset would allow the user to decide on whether to fail the lock acquisition on a signal. This allows the caller to choose their own compromise: they could narrow the race by checking immediately before the fcntl call. \begin_inset Foot status collapsed \begin_layout Plain Layout It may be possible to make this race-free in some implementations by having the signal handler alter the struct flock to make it invalid. This will cause the fcntl() lock call to fail with EINVAL if the signal occurs before the kernel is entered, otherwise EAGAIN. \end_layout \end_inset \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection The API Uses Gratuitous Typedefs, Capitals \end_layout \begin_layout Standard typedefs are useful for providing source compatibility when types can differ across implementations, or arguably in the case of function pointer definitions which are hard for humans to parse. Otherwise it is simply obfuscation and pollutes the namespace. \end_layout \begin_layout Standard Capitalization is usually reserved for compile-time constants and macros. \end_layout \begin_layout Description TDB_CONTEXT There is no reason to use this over 'struct tdb_context'; the definition isn't visible to the API user anyway. \end_layout \begin_layout Description TDB_DATA There is no reason to use this over struct TDB_DATA; the struct needs to be understood by the API user. \end_layout \begin_layout Description struct \begin_inset space ~ \end_inset TDB_DATA This would normally be called 'struct tdb_data'. \end_layout \begin_layout Description enum \begin_inset space ~ \end_inset TDB_ERROR Similarly, this would normally be enum tdb_error. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard None. Introducing lower case variants would please pedants like myself, but if it were done the existing ones should be kept. There is little point forcing a purely cosmetic change upon tdb users. \end_layout \begin_layout Subsection \begin_inset CommandInset label LatexCommand label name "tdb_log_func-Doesnt-Take" \end_inset tdb_log_func Doesn't Take The Private Pointer \end_layout \begin_layout Standard For API compatibility reasons, the logging function needs to call tdb_get_loggin g_private() to retrieve the pointer registered by the tdb_open_ex for logging. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard It should simply take an extra argument, since we are prepared to break the API/ABI. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection Various Callback Functions Are Not Typesafe \end_layout \begin_layout Standard The callback functions in tdb_set_logging_function (after \begin_inset CommandInset ref LatexCommand ref reference "tdb_log_func-Doesnt-Take" \end_inset is resolved), tdb_parse_record, tdb_traverse, tdb_traverse_read and tdb_check all take void * and must internally convert it to the argument type they were expecting. \end_layout \begin_layout Standard If this type changes, the compiler will not produce warnings on the callers, since it only sees void *. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard With careful use of macros, we can create callback functions which give a warning when used on gcc and the types of the callback and its private argument differ. Unsupported compilers will not give a warning, which is no worse than now. In addition, the callbacks become clearer, as they need not use void * for their parameter. \end_layout \begin_layout Standard See CCAN's typesafe_cb module at http://ccan.ozlabs.org/info/typesafe_cb.html \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection TDB_CLEAR_IF_FIRST Must Be Specified On All Opens, tdb_reopen_all Problematic \end_layout \begin_layout Standard The TDB_CLEAR_IF_FIRST flag to tdb_open indicates that the TDB file should be cleared if the caller discovers it is the only process with the TDB open. However, if any caller does not specify TDB_CLEAR_IF_FIRST it will not be detected, so will have the TDB erased underneath them (usually resulting in a crash). \end_layout \begin_layout Standard There is a similar issue on fork(); if the parent exits (or otherwise closes the tdb) before the child calls tdb_reopen_all() to establish the lock used to indicate the TDB is opened by someone, a TDB_CLEAR_IF_FIRST opener at that moment will believe it alone has opened the TDB and will erase it. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard Remove TDB_CLEAR_IF_FIRST. Other workarounds are possible, but see \begin_inset CommandInset ref LatexCommand ref reference "TDB_CLEAR_IF_FIRST-Imposes-Performance" \end_inset . \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. An open hook is provided to replicate this functionality if required. \end_layout \begin_layout Subsection Extending The Header Is Difficult \end_layout \begin_layout Standard We have reserved (zeroed) words in the TDB header, which can be used for future features. If the future features are compulsory, the version number must be updated to prevent old code from accessing the database. But if the future feature is optional, we have no way of telling if older code is accessing the database or not. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard The header should contain a \begin_inset Quotes eld \end_inset format variant \begin_inset Quotes erd \end_inset value (64-bit). This is divided into two 32-bit parts: \end_layout \begin_layout Enumerate The lower part reflects the format variant understood by code accessing the database. \end_layout \begin_layout Enumerate The upper part reflects the format variant you must understand to write to the database (otherwise you can only open for reading). \end_layout \begin_layout Standard The latter field can only be written at creation time, the former should be written under the OPEN_LOCK when opening the database for writing, if the variant of the code is lower than the current lowest variant. \end_layout \begin_layout Standard This should allow backwards-compatible features to be added, and detection if older code (which doesn't understand the feature) writes to the database. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection Record Headers Are Not Expandible \end_layout \begin_layout Standard If we later want to add (say) checksums on keys and data, it would require another format change, which we'd like to avoid. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard We often have extra padding at the tail of a record. If we ensure that the first byte (if any) of this padding is zero, we will have a way for future changes to detect code which doesn't understand a new format: the new code would write (say) a 1 at the tail, and thus if there is no tail or the first byte is 0, we would know the extension is not present on that record. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection TDB Does Not Use Talloc \end_layout \begin_layout Standard Many users of TDB (particularly Samba) use the talloc allocator, and thus have to wrap TDB in a talloc context to use it conveniently. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard The allocation within TDB is not complicated enough to justify the use of talloc, and I am reluctant to force another (excellent) library on TDB users. Nonetheless a compromise is possible. An attribute (see \begin_inset CommandInset ref LatexCommand ref reference "attributes" \end_inset ) can be added later to tdb_open() to provide an alternate allocation mechanism, specifically for talloc but usable by any other allocator (which would ignore the \begin_inset Quotes eld \end_inset context \begin_inset Quotes erd \end_inset argument). \end_layout \begin_layout Standard This would form a talloc heirarchy as expected, but the caller would still have to attach a destructor to the tdb context returned from tdb_open to close it. All TDB_DATA fields would be children of the tdb_context, and the caller would still have to manage them (using talloc_free() or talloc_steal()). \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete, using the NTDB_ATTRIBUTE_ALLOCATOR attribute. \end_layout \begin_layout Section Performance And Scalability Issues \end_layout \begin_layout Subsection \begin_inset CommandInset label LatexCommand label name "TDB_CLEAR_IF_FIRST-Imposes-Performance" \end_inset TDB_CLEAR_IF_FIRST Imposes Performance Penalty \end_layout \begin_layout Standard When TDB_CLEAR_IF_FIRST is specified, a 1-byte read lock is placed at offset 4 (aka. the ACTIVE_LOCK). While these locks never conflict in normal tdb usage, they do add substantial overhead for most fcntl lock implementations when the kernel scans to detect if a lock conflict exists. This is often a single linked list, making the time to acquire and release a fcntl lock O(N) where N is the number of processes with the TDB open, not the number actually doing work. \end_layout \begin_layout Standard In a Samba server it is common to have huge numbers of clients sitting idle, and thus they have weaned themselves off the TDB_CLEAR_IF_FIRST flag. \begin_inset Foot status collapsed \begin_layout Plain Layout There is a flag to tdb_reopen_all() which is used for this optimization: if the parent process will outlive the child, the child does not need the ACTIVE_LOCK. This is a workaround for this very performance issue. \end_layout \end_inset \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard Remove the flag. It was a neat idea, but even trivial servers tend to know when they are initializing for the first time and can simply unlink the old tdb at that point. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection TDB Files Have a 4G Limit \end_layout \begin_layout Standard This seems to be becoming an issue (so much for \begin_inset Quotes eld \end_inset trivial \begin_inset Quotes erd \end_inset !), particularly for ldb. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard A new, incompatible TDB format which uses 64 bit offsets internally rather than 32 bit as now. For simplicity of endian conversion (which TDB does on the fly if required), all values will be 64 bit on disk. In practice, some upper bits may be used for other purposes, but at least 56 bits will be available for file offsets. \end_layout \begin_layout Standard tdb_open() will automatically detect the old version, and even create them if TDB_VERSION6 is specified to tdb_open. \end_layout \begin_layout Standard 32 bit processes will still be able to access TDBs larger than 4G (assuming that their off_t allows them to seek to 64 bits), they will gracefully fall back as they fail to mmap. This can happen already with large TDBs. \end_layout \begin_layout Standard Old versions of tdb will fail to open the new TDB files (since 28 August 2009, commit 398d0c29290: prior to that any unrecognized file format would be erased and initialized as a fresh tdb!) \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection TDB Records Have a 4G Limit \end_layout \begin_layout Standard This has not been a reported problem, and the API uses size_t which can be 64 bit on 64 bit platforms. However, other limits may have made such an issue moot. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard Record sizes will be 64 bit, with an error returned on 32 bit platforms which try to access such records (the current implementation would return TDB_ERR_OOM in a similar case). It seems unlikely that 32 bit keys will be a limitation, so the implementation may not support this (see \begin_inset CommandInset ref LatexCommand ref reference "sub:Records-Incur-A" \end_inset ). \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection Hash Size Is Determined At TDB Creation Time \end_layout \begin_layout Standard TDB contains a number of hash chains in the header; the number is specified at creation time, and defaults to 131. This is such a bottleneck on large databases (as each hash chain gets quite long), that LDB uses 10,000 for this hash. In general it is impossible to know what the 'right' answer is at database creation time. \end_layout \begin_layout Subsubsection \begin_inset CommandInset label LatexCommand label name "sub:Hash-Size-Solution" \end_inset Proposed Solution \end_layout \begin_layout Standard After comprehensive performance testing on various scalable hash variants \begin_inset Foot status collapsed \begin_layout Plain Layout http://rusty.ozlabs.org/?p=89 and http://rusty.ozlabs.org/?p=94 This was annoying because I was previously convinced that an expanding tree of hashes would be very close to optimal. \end_layout \end_inset , it became clear that it is hard to beat a straight linear hash table which doubles in size when it reaches saturation. Unfortunately, altering the hash table introduces serious locking complications : the entire hash table needs to be locked to enlarge the hash table, and others might be holding locks. Particularly insidious are insertions done under tdb_chainlock. \end_layout \begin_layout Standard Thus an expanding layered hash will be used: an array of hash groups, with each hash group exploding into pointers to lower hash groups once it fills, turning into a hash tree. This has implications for locking: we must lock the entire group in case we need to expand it, yet we don't know how deep the tree is at that point. \end_layout \begin_layout Standard Note that bits from the hash table entries should be stolen to hold more hash bits to reduce the penalty of collisions. We can use the otherwise-unused lower 3 bits. If we limit the size of the database to 64 exabytes, we can use the top 8 bits of the hash entry as well. These 11 bits would reduce false positives down to 1 in 2000 which is more than we need: we can use one of the bits to indicate that the extra hash bits are valid. This means we can choose not to re-hash all entries when we expand a hash group; simply use the next bits we need and mark them invalid. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Ignore. Scaling the hash automatically proved inefficient at small hash sizes; we default to a 8192-element hash (changable via NTDB_ATTRIBUTE_HASHSIZE), and when buckets clash we expand to an array of hash entries. This scales slightly better than the tdb chain (due to the 8 top bits containin g extra hash). \end_layout \begin_layout Subsection \begin_inset CommandInset label LatexCommand label name "TDB-Freelist-Is" \end_inset TDB Freelist Is Highly Contended \end_layout \begin_layout Standard TDB uses a single linked list for the free list. Allocation occurs as follows, using heuristics which have evolved over time: \end_layout \begin_layout Enumerate Get the free list lock for this whole operation. \end_layout \begin_layout Enumerate Multiply length by 1.25, so we always over-allocate by 25%. \end_layout \begin_layout Enumerate Set the slack multiplier to 1. \end_layout \begin_layout Enumerate Examine the current freelist entry: if it is > length but < the current best case, remember it as the best case. \end_layout \begin_layout Enumerate Multiply the slack multiplier by 1.05. \end_layout \begin_layout Enumerate If our best fit so far is less than length * slack multiplier, return it. The slack will be turned into a new free record if it's large enough. \end_layout \begin_layout Enumerate Otherwise, go onto the next freelist entry. \end_layout \begin_layout Standard Deleting a record occurs as follows: \end_layout \begin_layout Enumerate Lock the hash chain for this whole operation. \end_layout \begin_layout Enumerate Walk the chain to find the record, keeping the prev pointer offset. \end_layout \begin_layout Enumerate If max_dead is non-zero: \end_layout \begin_deeper \begin_layout Enumerate Walk the hash chain again and count the dead records. \end_layout \begin_layout Enumerate If it's more than max_dead, bulk free all the dead ones (similar to steps 4 and below, but the lock is only obtained once). \end_layout \begin_layout Enumerate Simply mark this record as dead and return. \end_layout \end_deeper \begin_layout Enumerate Get the free list lock for the remainder of this operation. \end_layout \begin_layout Enumerate \begin_inset CommandInset label LatexCommand label name "right-merging" \end_inset Examine the following block to see if it is free; if so, enlarge the current block and remove that block from the free list. This was disabled, as removal from the free list was O(entries-in-free-list). \end_layout \begin_layout Enumerate Examine the preceeding block to see if it is free: for this reason, each block has a 32-bit tailer which indicates its length. If it is free, expand it to cover our new block and return. \end_layout \begin_layout Enumerate Otherwise, prepend ourselves to the free list. \end_layout \begin_layout Standard Disabling right-merging (step \begin_inset CommandInset ref LatexCommand ref reference "right-merging" \end_inset ) causes fragmentation; the other heuristics proved insufficient to address this, so the final answer to this was that when we expand the TDB file inside a transaction commit, we repack the entire tdb. \end_layout \begin_layout Standard The single list lock limits our allocation rate; due to the other issues this is not currently seen as a bottleneck. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard The first step is to remove all the current heuristics, as they obviously interact, then examine them once the lock contention is addressed. \end_layout \begin_layout Standard The free list must be split to reduce contention. Assuming perfect free merging, we can at most have 1 free list entry for each entry. This implies that the number of free lists is related to the size of the hash table, but as it is rare to walk a large number of free list entries we can use far fewer, say 1/32 of the number of hash buckets. \end_layout \begin_layout Standard It seems tempting to try to reuse the hash implementation which we use for records here, but we have two ways of searching for free entries: for allocatio n we search by size (and possibly zone) which produces too many clashes for our hash table to handle well, and for coalescing we search by address. Thus an array of doubly-linked free lists seems preferable. \end_layout \begin_layout Standard There are various benefits in using per-size free lists (see \begin_inset CommandInset ref LatexCommand ref reference "sub:TDB-Becomes-Fragmented" \end_inset ) but it's not clear this would reduce contention in the common case where all processes are allocating/freeing the same size. Thus we almost certainly need to divide in other ways: the most obvious is to divide the file into zones, and using a free list (or table of free lists) for each. This approximates address ordering. \end_layout \begin_layout Standard Unfortunately it is difficult to know what heuristics should be used to determine zone sizes, and our transaction code relies on being able to create a \begin_inset Quotes eld \end_inset recovery area \begin_inset Quotes erd \end_inset by simply appending to the file (difficult if it would need to create a new zone header). Thus we use a linked-list of free tables; currently we only ever create one, but if there is more than one we choose one at random to use. In future we may use heuristics to add new free tables on contention. We only expand the file when all free tables are exhausted. \end_layout \begin_layout Standard The basic algorithm is as follows. Freeing is simple: \end_layout \begin_layout Enumerate Identify the correct free list. \end_layout \begin_layout Enumerate Lock the corresponding list. \end_layout \begin_layout Enumerate Re-check the list (we didn't have a lock, sizes could have changed): relock if necessary. \end_layout \begin_layout Enumerate Place the freed entry in the list. \end_layout \begin_layout Standard Allocation is a little more complicated, as we perform delayed coalescing at this point: \end_layout \begin_layout Enumerate Pick a free table; usually the previous one. \end_layout \begin_layout Enumerate Lock the corresponding list. \end_layout \begin_layout Enumerate If the top entry is -large enough, remove it from the list and return it. \end_layout \begin_layout Enumerate Otherwise, coalesce entries in the list.If there was no entry large enough, unlock the list and try the next largest list \end_layout \begin_layout Enumerate If no list has an entry which meets our needs, try the next free table. \end_layout \begin_layout Enumerate If no zone satisfies, expand the file. \end_layout \begin_layout Standard This optimizes rapid insert/delete of free list entries by not coalescing them all the time.. First-fit address ordering ordering seems to be fairly good for keeping fragmentation low (see \begin_inset CommandInset ref LatexCommand ref reference "sub:TDB-Becomes-Fragmented" \end_inset ). Note that address ordering does not need a tailer to coalesce, though if we needed one we could have one cheaply: see \begin_inset CommandInset ref LatexCommand ref reference "sub:Records-Incur-A" \end_inset . \end_layout \begin_layout Standard Each free entry has the free table number in the header: less than 255. It also contains a doubly-linked list for easy deletion. \end_layout \begin_layout Subsection \begin_inset CommandInset label LatexCommand label name "sub:TDB-Becomes-Fragmented" \end_inset TDB Becomes Fragmented \end_layout \begin_layout Standard Much of this is a result of allocation strategy \begin_inset Foot status collapsed \begin_layout Plain Layout The Memory Fragmentation Problem: Solved? Johnstone & Wilson 1995 ftp://ftp.cs.ute xas.edu/pub/garbage/malloc/ismm98.ps \end_layout \end_inset and deliberate hobbling of coalescing; internal fragmentation (aka overallocati on) is deliberately set at 25%, and external fragmentation is only cured by the decision to repack the entire db when a transaction commit needs to enlarge the file. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard The 25% overhead on allocation works in practice for ldb because indexes tend to expand by one record at a time. This internal fragmentation can be resolved by having an \begin_inset Quotes eld \end_inset expanded \begin_inset Quotes erd \end_inset bit in the header to note entries that have previously expanded, and allocating more space for them. \end_layout \begin_layout Standard There are is a spectrum of possible solutions for external fragmentation: one is to use a fragmentation-avoiding allocation strategy such as best-fit address-order allocator. The other end of the spectrum would be to use a bump allocator (very fast and simple) and simply repack the file when we reach the end. \end_layout \begin_layout Standard There are three problems with efficient fragmentation-avoiding allocators: they are non-trivial, they tend to use a single free list for each size, and there's no evidence that tdb allocation patterns will match those recorded for general allocators (though it seems likely). \end_layout \begin_layout Standard Thus we don't spend too much effort on external fragmentation; we will be no worse than the current code if we need to repack on occasion. More effort is spent on reducing freelist contention, and reducing overhead. \end_layout \begin_layout Subsection \begin_inset CommandInset label LatexCommand label name "sub:Records-Incur-A" \end_inset Records Incur A 28-Byte Overhead \end_layout \begin_layout Standard Each TDB record has a header as follows: \end_layout \begin_layout LyX-Code struct tdb_record { \end_layout \begin_layout LyX-Code tdb_off_t next; /* offset of the next record in the list */ \end_layout \begin_layout LyX-Code tdb_len_t rec_len; /* total byte length of record */ \end_layout \begin_layout LyX-Code tdb_len_t key_len; /* byte length of key */ \end_layout \begin_layout LyX-Code tdb_len_t data_len; /* byte length of data */ \end_layout \begin_layout LyX-Code uint32_t full_hash; /* the full 32 bit hash of the key */ \end_layout \begin_layout LyX-Code uint32_t magic; /* try to catch errors */ \end_layout \begin_layout LyX-Code /* the following union is implied: \end_layout \begin_layout LyX-Code union { \end_layout \begin_layout LyX-Code char record[rec_len]; \end_layout \begin_layout LyX-Code struct { \end_layout \begin_layout LyX-Code char key[key_len]; \end_layout \begin_layout LyX-Code char data[data_len]; \end_layout \begin_layout LyX-Code } \end_layout \begin_layout LyX-Code uint32_t totalsize; (tailer) \end_layout \begin_layout LyX-Code } \end_layout \begin_layout LyX-Code */ \end_layout \begin_layout LyX-Code }; \end_layout \begin_layout Standard Naively, this would double to a 56-byte overhead on a 64 bit implementation. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard We can use various techniques to reduce this for an allocated block: \end_layout \begin_layout Enumerate The 'next' pointer is not required, as we are using a flat hash table. \end_layout \begin_layout Enumerate 'rec_len' can instead be expressed as an addition to key_len and data_len (it accounts for wasted or overallocated length in the record). Since the record length is always a multiple of 8, we can conveniently fit it in 32 bits (representing up to 35 bits). \end_layout \begin_layout Enumerate 'key_len' and 'data_len' can be reduced. I'm unwilling to restrict 'data_len' to 32 bits, but instead we can combine the two into one 64-bit field and using a 5 bit value which indicates at what bit to divide the two. Keys are unlikely to scale as fast as data, so I'm assuming a maximum key size of 32 bits. \end_layout \begin_layout Enumerate 'full_hash' is used to avoid a memcmp on the \begin_inset Quotes eld \end_inset miss \begin_inset Quotes erd \end_inset case, but this is diminishing returns after a handful of bits (at 10 bits, it reduces 99.9% of false memcmp). As an aside, as the lower bits are already incorporated in the hash table resolution, the upper bits should be used here. Note that it's not clear that these bits will be a win, given the extra bits in the hash table itself (see \begin_inset CommandInset ref LatexCommand ref reference "sub:Hash-Size-Solution" \end_inset ). \end_layout \begin_layout Enumerate 'magic' does not need to be enlarged: it currently reflects one of 5 values (used, free, dead, recovery, and unused_recovery). It is useful for quick sanity checking however, and should not be eliminated. \end_layout \begin_layout Enumerate 'tailer' is only used to coalesce free blocks (so a block to the right can find the header to check if this block is free). This can be replaced by a single 'free' bit in the header of the following block (and the tailer only exists in free blocks). \begin_inset Foot status collapsed \begin_layout Plain Layout This technique from Thomas Standish. Data Structure Techniques. Addison-Wesley, Reading, Massachusetts, 1980. \end_layout \end_inset The current proposed coalescing algorithm doesn't need this, however. \end_layout \begin_layout Standard This produces a 16 byte used header like this: \end_layout \begin_layout LyX-Code struct tdb_used_record { \end_layout \begin_layout LyX-Code uint32_t used_magic : 16, \end_layout \begin_layout LyX-Code \end_layout \begin_layout LyX-Code key_data_divide: 5, \end_layout \begin_layout LyX-Code top_hash: 11; \end_layout \begin_layout LyX-Code uint32_t extra_octets; \end_layout \begin_layout LyX-Code uint64_t key_and_data_len; \end_layout \begin_layout LyX-Code }; \end_layout \begin_layout Standard And a free record like this: \end_layout \begin_layout LyX-Code struct tdb_free_record { \end_layout \begin_layout LyX-Code uint64_t free_magic: 8, \end_layout \begin_layout LyX-Code prev : 56; \end_layout \begin_layout LyX-Code \end_layout \begin_layout LyX-Code uint64_t free_table: 8, \end_layout \begin_layout LyX-Code total_length : 56 \end_layout \begin_layout LyX-Code uint64_t next;; \end_layout \begin_layout LyX-Code }; \end_layout \begin_layout Standard Note that by limiting valid offsets to 56 bits, we can pack everything we need into 3 64-byte words, meaning our minimum record size is 8 bytes. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection Transaction Commit Requires 4 fdatasync \end_layout \begin_layout Standard The current transaction algorithm is: \end_layout \begin_layout Enumerate write_recovery_data(); \end_layout \begin_layout Enumerate sync(); \end_layout \begin_layout Enumerate write_recovery_header(); \end_layout \begin_layout Enumerate sync(); \end_layout \begin_layout Enumerate overwrite_with_new_data(); \end_layout \begin_layout Enumerate sync(); \end_layout \begin_layout Enumerate remove_recovery_header(); \end_layout \begin_layout Enumerate sync(); \end_layout \begin_layout Standard On current ext3, each sync flushes all data to disk, so the next 3 syncs are relatively expensive. But this could become a performance bottleneck on other filesystems such as ext4. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard Neil Brown points out that this is overzealous, and only one sync is needed: \end_layout \begin_layout Enumerate Bundle the recovery data, a transaction counter and a strong checksum of the new data. \end_layout \begin_layout Enumerate Strong checksum that whole bundle. \end_layout \begin_layout Enumerate Store the bundle in the database. \end_layout \begin_layout Enumerate Overwrite the oldest of the two recovery pointers in the header (identified using the transaction counter) with the offset of this bundle. \end_layout \begin_layout Enumerate sync. \end_layout \begin_layout Enumerate Write the new data to the file. \end_layout \begin_layout Standard Checking for recovery means identifying the latest bundle with a valid checksum and using the new data checksum to ensure that it has been applied. This is more expensive than the current check, but need only be done at open. For running databases, a separate header field can be used to indicate a transaction in progress; we need only check for recovery if this is set. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Deferred. \end_layout \begin_layout Subsection \begin_inset CommandInset label LatexCommand label name "sub:TDB-Does-Not" \end_inset TDB Does Not Have Snapshot Support \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard None. At some point you say \begin_inset Quotes eld \end_inset use a real database \begin_inset Quotes erd \end_inset (but see \begin_inset CommandInset ref LatexCommand ref reference "replay-attribute" \end_inset ). \end_layout \begin_layout Standard But as a thought experiment, if we implemented transactions to only overwrite free entries (this is tricky: there must not be a header in each entry which indicates whether it is free, but use of presence in metadata elsewhere), and a pointer to the hash table, we could create an entirely new commit without destroying existing data. Then it would be easy to implement snapshots in a similar way. \end_layout \begin_layout Standard This would not allow arbitrary changes to the database, such as tdb_repack does, and would require more space (since we have to preserve the current and future entries at once). If we used hash trees rather than one big hash table, we might only have to rewrite some sections of the hash, too. \end_layout \begin_layout Standard We could then implement snapshots using a similar method, using multiple different hash tables/free tables. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Deferred. \end_layout \begin_layout Subsection Transactions Cannot Operate in Parallel \end_layout \begin_layout Standard This would be useless for ldb, as it hits the index records with just about every update. It would add significant complexity in resolving clashes, and cause the all transaction callers to write their code to loop in the case where the transactions spuriously failed. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard None (but see \begin_inset CommandInset ref LatexCommand ref reference "replay-attribute" \end_inset ). We could solve a small part of the problem by providing read-only transactions. These would allow one write transaction to begin, but it could not commit until all r/o transactions are done. This would require a new RO_TRANSACTION_LOCK, which would be upgraded on commit. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Deferred. \end_layout \begin_layout Subsection Default Hash Function Is Suboptimal \end_layout \begin_layout Standard The Knuth-inspired multiplicative hash used by tdb is fairly slow (especially if we expand it to 64 bits), and works best when the hash bucket size is a prime number (which also means a slow modulus). In addition, it is highly predictable which could potentially lead to a Denial of Service attack in some TDB uses. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard The Jenkins lookup3 hash \begin_inset Foot status open \begin_layout Plain Layout http://burtleburtle.net/bob/c/lookup3.c \end_layout \end_inset is a fast and superbly-mixing hash. It's used by the Linux kernel and almost everything else. This has the particular properties that it takes an initial seed, and produces two 32 bit hash numbers, which we can combine into a 64-bit hash. \end_layout \begin_layout Standard The seed should be created at tdb-creation time from some random source, and placed in the header. This is far from foolproof, but adds a little bit of protection against hash bombing. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection \begin_inset CommandInset label LatexCommand label name "Reliable-Traversal-Adds" \end_inset Reliable Traversal Adds Complexity \end_layout \begin_layout Standard We lock a record during traversal iteration, and try to grab that lock in the delete code. If that grab on delete fails, we simply mark it deleted and continue onwards; traversal checks for this condition and does the delete when it moves off the record. \end_layout \begin_layout Standard If traversal terminates, the dead record may be left indefinitely. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard Remove reliability guarantees; see \begin_inset CommandInset ref LatexCommand ref reference "traverse-Proposed-Solution" \end_inset . \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Complete. \end_layout \begin_layout Subsection Fcntl Locking Adds Overhead \end_layout \begin_layout Standard Placing a fcntl lock means a system call, as does removing one. This is actually one reason why transactions can be faster (everything is locked once at transaction start). In the uncontended case, this overhead can theoretically be eliminated. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard None. \end_layout \begin_layout Standard We tried this before with spinlock support, in the early days of TDB, and it didn't make much difference except in manufactured benchmarks. \end_layout \begin_layout Standard We could use spinlocks (with futex kernel support under Linux), but it means that we lose automatic cleanup when a process dies with a lock. There is a method of auto-cleanup under Linux, but it's not supported by other operating systems. We could reintroduce a clear-if-first-style lock and sweep for dead futexes on open, but that wouldn't help the normal case of one concurrent opener dying. Increasingly elaborate repair schemes could be considered, but they require an ABI change (everyone must use them) anyway, so there's no need to do this at the same time as everything else. \end_layout \begin_layout Subsection Some Transactions Don't Require Durability \end_layout \begin_layout Standard Volker points out that gencache uses a CLEAR_IF_FIRST tdb for normal (fast) usage, and occasionally empties the results into a transactional TDB. This kind of usage prioritizes performance over durability: as long as we are consistent, data can be lost. \end_layout \begin_layout Standard This would be more neatly implemented inside tdb: a \begin_inset Quotes eld \end_inset soft \begin_inset Quotes erd \end_inset transaction commit (ie. syncless) which meant that data may be reverted on a crash. \end_layout \begin_layout Subsubsection Proposed Solution \end_layout \begin_layout Standard None. \end_layout \begin_layout Standard Unfortunately any transaction scheme which overwrites old data requires a sync before that overwrite to avoid the possibility of corruption. \end_layout \begin_layout Standard It seems possible to use a scheme similar to that described in \begin_inset CommandInset ref LatexCommand ref reference "sub:TDB-Does-Not" \end_inset ,where transactions are committed without overwriting existing data, and an array of top-level pointers were available in the header. If the transaction is \begin_inset Quotes eld \end_inset soft \begin_inset Quotes erd \end_inset then we would not need a sync at all: existing processes would pick up the new hash table and free list and work with that. \end_layout \begin_layout Standard At some later point, a sync would allow recovery of the old data into the free lists (perhaps when the array of top-level pointers filled). On crash, tdb_open() would examine the array of top levels, and apply the transactions until it encountered an invalid checksum. \end_layout \begin_layout Subsection Tracing Is Fragile, Replay Is External \end_layout \begin_layout Standard The current TDB has compile-time-enabled tracing code, but it often breaks as it is not enabled by default. In a similar way, the ctdb code has an external wrapper which does replay tracing so it can coordinate cluster-wide transactions. \end_layout \begin_layout Subsubsection Proposed Solution \begin_inset CommandInset label LatexCommand label name "replay-attribute" \end_inset \end_layout \begin_layout Standard Tridge points out that an attribute can be later added to tdb_open (see \begin_inset CommandInset ref LatexCommand ref reference "attributes" \end_inset ) to provide replay/trace hooks, which could become the basis for this and future parallel transactions and snapshot support. \end_layout \begin_layout Subsubsection Status \end_layout \begin_layout Standard Deferred. \end_layout \end_body \end_document ntdb-1.0/doc/design.pdf000066400000000000000000005667071224151530700150370ustar00rootroot00000000000000%PDF-1.4 %ÐÔÅØ 3 0 obj << /Length 2170 /Filter /FlateDecode >> stream xÚ­ZKsã6¾ûWpO¡ªFâE‚Ù\ìI&ëT’ÚÍø’Jr DXbF">ìøß§ ÑÔ ÅD+ž6€þºûën€²£`DÁ·7Ñß<ïnÞ¾çIÀ&¸VÁÃcÀµf\1×L¦Ið¿„?>|}÷åb)¹ ²¹mŠMY”\î­*—yÑ|¤—ǪÞg-‹†žRÐsU´Í¿Á¿H†ÏE»%a×d¯h *ý–­Íò¥‹×\4~#)HŒ*¶ÕB¤áóB˜0«óÆ Í0PßâS…»b_´HŽŠa­ÃÙ=nEÝÛul³rcÙb©bL–Âë«í]QÛœÞVµÍ>.t:³Qr¨+25:µò©ÈÑ)t˜•DpuÀ„0mWbL‹­x¡Uà[UÛ§¢)ZB>ÑøÝàYMRÊìã5­aKUöx9 ÀXHͦ³´íe¼äW âã2~)K#ÈQ)Xª|VqáQÞ— ÉöF[y˜wëcfCÝè …Ì1*0Á™áé §”L1§d_{(xM&|{ƲÊz©êbS”Ùn÷â'ë¢mmI/+_€8¾-óÚ>Ÿä¨H’ÕeðPQØûÃÎöq•NF~Ä$°/os0 ó4 Yâ°i¡Šå n^šÖîi‚RG-òà’×g{?ºýï=íB;P¯ö˜\„«®%øX!ø3¥6,Þãk·k‹3¶³¹­]Š&žoe\„zéó¶ØY®\ÚY_8àÀ>Ãèà õír±‚A‡ƒ ¶4ùk¤£¯h ‘p‡¡t¹ oÕ#Í¿ƒµœ†-©sã¢\ïºÜ_Æ\ùpûÃÝ-û¤G7®<É‘"õšô0½ÿ[W“ðRx!OÓ”–n]¬Aʼn|Âdñžâ|ÓÕ‡*¦Ü`náºZQ3€Y çˆT»¶v=®Dµ®«D Úš"Rç¡{~È+ô"#ѓˮڗ)J$={8PþÝO»¾æ²x(uÀÝÞÖUç%¾/àÝ]ê8üݮۆ9qߺÁÑæÇs@è–¤ÔKo0È.@ǘ;Gñu… _hu\bº¬2×K?R%CW^®«ýÀÕ ø¥‰R—¡7/»Æ•Hñ qYªrµÍüŽT+°Éãâ¦Ú[ÀX[Õ›¬,Öx{@C¨7¸#3Ñrv@‡yC#‡K©ÓKaºRRWû‚\`xqc¯Ó5ɽ¥4w×—Ñtú±*]çvüÁÓ%CcÁ±}íÑÜ7pK¢*hÜè{µßÝXêëÂwklÉë%tÞñôŽû\ãôZiÛºól%Œ1sí¯‘”PÈAê.(Î4wðÂ3#ñ®jü„ëmðÄÄÝY dmý„o*h“×±õSűŸâ …cî?3èØq8~±‹2,Ä@'} Á#þÇÖ£>¸kV)>pÏʩԊñI$â’ƒïp+³´Á!uÇöä]À‰ãö5÷¡Æc#>&µ5Nœtù3ëA ®˜FAaS=Vñ.æà>›0xt‡R¹vá¤+T{櫟âëÎ@ZjD[’¾8²²úKw-þæáæÐQ€WÃ.‘$L ¬÷7¿ü9L‚uðma‚g·t#X’Æ0Þnþ7¦$ÖLÄé”’È^xôe3¼;Á÷ "†OŽ#ë?ãµì'o¾¡êÔøh`‰á³˜ ¢„%úzJÞ£1]¹&>>7 Úf±ÃHÆErjÇ÷þöÑ“-ÃwÄù»Ê}p¹û¼Þ»’ƒÔ–×çöB#a:2ÿ‡ÁãüÃÝùÊ’ ãZ%îúÓÙü÷‰ˆ11èR ôøµ ·¬óÉ7¬Š)£?•çmLøPd‹î“§z"].†g)¹ºçHÃôµ=çHµRß‹ÞùÏS=lOõ ¬:Oõ ˜=ÕCLžˆ)ª/†gI^[Ô"ÆŸ¿æaZ¤ÌD¢gšO0=lÏôVŠóLÏ€Ù3}â*\L&˜¾vœ¥ˆÇWR?|Åbª9Ül”é©TÏÛS=„•ú<Õ3`öTŸ¸ª Ÿ úbØq–âiÿ€êˆ š…jøŽRÝS-'¨ž¶§z;Eõ ˜=Õ'®j-&¨¾vœ%¥×QÍMÊðÏsPÍS@S¼§Z§zØ#Õ'°*:Kõ˜GªO]ÕF§úrØ1–Œ›¿Žê$†C‡ÏCµ&é©ÖTÏÛS=„•æ<Õ3`öTŸ¸«d‚ê‹aÇYЧuüªcˆ—J桾t#©zªã ªg€í©Âêóײ90{ª‡˜2ÑSU}1ì8Kp#¸’j-àЙç³ÿ€¿ÞÊ’ ¦g@í™ Æñy¢g€ì‰@*Ð0ÁóŨcqfRu%Ï2…gš•dR¼~R› ž¯Gíi¢&ü<Ï×Cö4!UOñ|)ê8CúZ–E‡M<Í2bi"{š'~$›¶çy›œÿ‘lÌžè!¦NøÄ/'—ÃŽ³ÄÍE?œþÛЉ‘d¸&ÊXÿ¹aø¿2àÇ_Í´:¤ endstream endobj 2 0 obj << /Type /Page /Contents 3 0 R /Resources 1 0 R /MediaBox [0 0 612 792] /Parent 10 0 R >> endobj 1 0 obj << /Font << /F17 4 0 R /F18 5 0 R /F29 6 0 R /F28 7 0 R /F31 8 0 R /F15 9 0 R >> /ProcSet [ /PDF /Text ] >> endobj 13 0 obj << /Length 1333 /Filter /FlateDecode >> stream xÚÅWKsã6 ¾çWø¶r&V¨§íÍôL³»étÚ®{j:Y¢mveR%©8n§ÿ½A9v¢lÒ\zˆ PˆÇaƒÕ€ >ž0¿^ÍNÎ?DÙ`Nó8Ì–ƒ(„ã(äÙ8Œ“l0«¿³µ0ÃQ’fæw‚o‘΃n¯ÄÖòMciÓ*Z˵Zy Yy•ªÒÜ¿[£ &Ú®9I|•jƒ­$VÓrÞVØõð÷ÙƒQ”†Ó4…5 §Yçë÷W –€Y° ÚQ€g&à‰æ…åDU·V(iHt»å0žkúŠÞ¡"  Cûä njµ@Çù¦3n©ÖúÄJŠ[–¤%dâ°õŽ>9r¾³c¯Ñ9#$ä)ÍÁUµij<ý@X4²»޲„AÂj’½¾ÃUß¡¯ðKš“ȲÐD`APޏu¶ø³—TKRµxæšSrŵ9”€àx© ’=a˜ `¤ Ð’Ò¨ÀÿË\Š»šoµ°.`d ¯Ò–%ÀbÙÖ$Ýhp7Ë‚?xiI¢_ÑioÍ.CçG~R–¿GýÔWn2 °$²¨‰©x)  €¸-æ¯0Ä8øÂº)öçG¥¬Ä´Þš\-ºÐ»á$ Î [,&¸>Võ8'·ug-šï\áÄwÈOYl¸iŠÒ6ŠÖ•Ölé:ˆÆïs2#8»C o¦’¢¬ˆw€žpCâtq¤¬ð½"(•m/”7þ4_R´eµ/ªæK‡(üêÒ ë-‹D K‡E€cj®w[p€Ÿ‘ÔRioT@ˆRXr¹v(8ÿDˆ$1Î2p,‰!Á19ƒ+cÁåç0“çÁ ÒŒD§'á$мf¡nœa)æªÁ4\Î9ôg2ÎÀ­€8"®ï˜uÅšØY?±˜¯IHè†+öÚ8u¶]æ±lyË2˜’1NR×ö H}kh›»£\ߣ åW7øÈWg |íÌA6³ˆ=´C¯3‚ðÁU5½ÆÁéZ†Þº0kßÅy°leéJíZ*éæ½#hUÍ¡2ŽOµZ=Òß+ëUÛa !˜ÅÁ%2U%¼‘ß•ƘÇ£´¦$±.‰ª­+b5ÿ³š{•µ'hÀZM€¯Ú½{Ì C\ ¯Ò%{BÉŽi›Š†Žs[v8MŽñgádšìÁFpËXðyˆ÷ â-”qý `ÿâo¯o`kìKuèÕœ°‡¶‚<ƒ&Ä7¦™ºÅ3aT ã›™bNñ¦×bÑZnÞ£ûçD·B€éS1L96†(¡P‘G=—-Ü™o)g!‹FiûoWÞ˜Jþ䆟_Îf¿Ü\¡Æ¯³ëù?$¥ïhagGØÈŸÓûtùåÓ‘bDc¹ïÔ.z/<«[¼›qo—ù¢0/Fó4îÎHnõžÝJÆÞ£iûTò{{ÑSþö˜ÜcîÛ!=5à.'IAê‚ÈóSZCJÀAIºª¡‰›«£jŸž÷Ž'8œœ/[T‘%ɺÙï•7ˆR !ñ.á‹WÃ!MêpXþϹ{@üóÉC7çKÔi»ìÑÖ‹ésb ø¿åïYH?ÌåoÉÜ«ph`{ Þ þP{¤^LÌ£±>ê¡þA6‰ñåôž_¶¶uw~ÛÏjä#ÿ²^€(„—?¾²¦i÷ô’ôE,½Úþ0zpxÆ?¬cx’ýÅIÒ]þÓs%®A”OCxÁs- ëîÁ#™ëÙÉ¿:Pç’ endstream endobj 12 0 obj << /Type /Page /Contents 13 0 R /Resources 11 0 R /MediaBox [0 0 612 792] /Parent 10 0 R >> endobj 11 0 obj << /Font << /F15 9 0 R /F31 8 0 R /F32 14 0 R /F33 15 0 R /F34 16 0 R >> /ProcSet [ /PDF /Text ] >> endobj 19 0 obj << /Length 1732 /Filter /FlateDecode >> stream xڥɮÛ6ðž¯ð­2+©5·,M-Ä(P$EAKô3ñdÑ¥¾¸_ßYHy©ÒèIÃápöMÉêa•¬Þ?Kü÷õöÙ‹wR®ê¸.D±ÚîW©”qYT«"/c!óÕ¶]}ŽDœÆb½IÓ<‰>ë4RãäÖ¿ozñ.ͯ'«MZŲ*ùÙ{Î^}üð¹ó¿†ì¦Èëè-U†C´¹ËcÐ2KrŸY’EŽÆ–éÐß|Ç•ÄðUÎà‘s[«4>0Ö|E³Ò¹éxŸ¾ñSj'(ò컫`\×âŽ0¯!Òï¿h7²{aäÓà€ïv]gÑÕpõ[‚ÿ¾ã[œ¸ü·c·o_Ci¥ (éÀÞ7ô  }á@®‚tó” 4PIÄô qx7¢×ƒ¢àzÒ5OSîUDÝýŒ þüİëÈ\=0LEÆïàuË7súÙ¯Z4`˜X{s¼oiQDz(V›*3Ü\ÞÐü¸}ö7Ùô½Ô endstream endobj 18 0 obj << /Type /Page /Contents 19 0 R /Resources 17 0 R /MediaBox [0 0 612 792] /Parent 10 0 R >> endobj 17 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R >> /ProcSet [ /PDF /Text ] >> endobj 22 0 obj << /Length 1829 /Filter /FlateDecode >> stream xÚ­XKÛ6¾çWø(±"‰’,·§¤ym´b´‡¦Xp%zMD ‘ÊîþûÎp†²¼Q·Òƒ¡áð5Ïo†NV·«dõîYÂßWûg/Þ ±ÚÅ»2+WûÃ*"Þ–Õª,¶q&ŠÕ¾Yýe±ˆÓõ&M‹$ú¸N£ÁœÖ‘EƪˆJDŸL;:múõßû__¼M‹Ù‰Éj“V±¨¶tÖ;ýuU‘ê×›‡á6‹´£V©Î†]’™°ÿs’%­’Ömì8œm‘âpÍÍzƒ—%ÞùU›ö›ín´•í…öb¦ñ®(IÌÖàŽ;ÝßÂÊtõÊ:T6O«È ²·²}Á½%¦=š±mh1ßZ›Nƒ´¢Q9¶.^oÊ]½Ä;šF£ådÛ>¬Á’¨´¨HiØ@JÏdŸø#E^Efcy¬'†¶>næ(ŽVÙ³2¨?èyM–³Ÿ“"_º¤‘ìñÎmé÷ ªó ’j ’?‰íŸF–Ftê]mÉW¸C ‰—¯ˆ˜œtoÅLyàªû\¯xÍÁ[ÊåM Ô Zé(ڇ͒üæÆÖã€Ñ+ÀûÒª˜°çÍ`OVÄÕNœq,cûä¼åGûdU´íÓZåÔÏt%ˆ  „ȧ`ʼn£´D°Öv8œ™£>Hœ­H±,ˆ¼«j‚ÞΊ誯Í0¨ *À ï¥=õv½øìCÚǧ=|3¼ø5H_c”ý§® †#®¿V÷SmÄNœÓÖÒ˜L±C` ·d¸@\gèkO”‘µ><ð‰ôi4ØO¨,Û1¤"P‡³ztÚB@@&@hŠ´ŒnF<  )0CDšQ0"ŸHÕí_ˆEWŽÄ2 yÀ; >Ú£ZYN“5ŽHc€æ 0!±¤Ï“t¶L-¥ìÔ;GŠ(RÌjú˜ÊÖ>DÞ ,T¢5ÇšÃ$%-ü ˆ–†§dÈ“N‰apLG7¡ÓÊ‘Y‚Ç¿+Ÿòÿ­/ðºee ~‘»r%@׆bCêžWËÐDb~òno>¤‚FÔ-ZÑoÖb?€G%÷ ܲ˜Û*"Ì[h<‚æyùDÎàìÔ}¼ ÏoæV)ô ÷@·š°¸§9©s*à(fƒ[Ç| ìåiO”>ªiXÍ0Š}lHÝ>B» -ì¶íSa’Ãä`÷û`²`˜D1­r×¼¿Fg¾Ø¿~uýÇZ¤Ñï^"Rî¯>¼-@–7÷®)†+1®ðBª6"‰Ë¬D¼ËY ô†åNþKDo¦M3-®0JE‚áHƃVIË·!p ;Ê mxóæ!YG_ÝæÎ@¤¾À: Š©V[G†žˆ Š>¿zê[ ­ 2 3ľ¬ H¥ÁŒ}ci‚Ká3/j;ë«6P+Clü¦˜ÿ'â<ö̼´$”M¸kJ‘ãÀ踊· ϼòúBÈf¸Ø|ƒž­_pÔnô­÷Ðö"îslOº°š¼@™ õvìÎU S(ÀF–€C |‹PK™æJù¥ì{ªI~Q1axQiaÜ(L…†gpO RwÔõØá–±õý1òP^ã«! N£ãv%ñ年ö{GÇ‹¹Y’“b-‡ÆZŒ‚_zb‘lDœ ol%ß”†yw„P»=Ò@ÉÐÊâr¢=¼ã$Ãëvº›ž1iÔñéa}n$ׯ%¨Ö,GgP©ÐB •F˜êL`"÷ÌãH%>{yÐÊâû LeyûtNÞ4ôMĶ|ö€w×uK½ØãPV÷µ:q=¤¤°²ºQå»$„ùY°M‹÷ò' L}<?^Åíþ¤©¸Ê"ÐM{(z &Ô­›Ö?çq䵘ïA—¨ùLûuz'Š]xsá”G3$jòÙì•…ƒ¥†û±Í ÊÝ2Þ@ûœœVu2¨ÿ0r¹[Zz¾¦¡%=Œ-ÑÞ°@Òð¨'Ì&wŸx¢£8Äa–ókét ¡Bòl5{ÿâÀß_ÿW ³xGmºÎ°ýoêièQ¾p …6cm5{ÐÐú=xª¨†^ YéÆðè9?•Fiìx{´+Np΃^çg¦]³œ½Ù˜“÷óô'×*-w±(K¨ÏU¼Mwty~±æÍþÙ?¼¹K endstream endobj 21 0 obj << /Type /Page /Contents 22 0 R /Resources 20 0 R /MediaBox [0 0 612 792] /Parent 10 0 R >> endobj 20 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R >> /ProcSet [ /PDF /Text ] >> endobj 25 0 obj << /Length 1970 /Filter /FlateDecode >> stream xÚ•XÛŽã¸}Ÿ¯pÞd`åÕý’}Êl2AÉìã ²A@K´MŒ,vHi<¯OÝ([÷&yèV±X$‹ÅÃSE'›Ó&Ùüþ]"ß÷ûwßÈóM»k«¬Úì›4ÏwuÕlª²Þey¹Ù÷›¿EÙ®ÜeÛ8MË$ú9KwÚ?:»x'Øðè»4‹žq,ĉƒ¦£¦y´ë!Ê,xZ-hãÔG§n:ý´Ë  CÏóaPalÊÒÖª±g¡7^ ƒÅ®+k̰ªš vo;°–ñ«“”C@_ó´ŒæñhÝ„œG5iVzÓëXs8mVÙ#ŸõÀÍA‚€nñß¡²®gCг •p¸e~K‹W‘âfèaÅb°>kÇvÓÜ}ÇWã^ûΙçÉ⸠¬¼!§ò„ã†ÊÞòWP‚ö¸¢€s9ãææ0¦¬7ã‰ÍÀXì…‹§8á«Uiw¸â£¸/‹’.Ý •óÜ„ÓD¡`À f]î´‡I¾`q><ØÜc·R ¦Òè+±ÀÈ6æFéÖ0I8-s ª «Ì…G(bè aÅi ±BqÛ),!úM“p‹õ\-ÁyìLÌ’þº•‹„-r™úi£Ìk¸ -o¶ïqÓu ˪~€ß‡‘M;;=‹ÒÙÍn½ Á¶&Ø;Þäá.âGÛ@4éþ׈§"x Ñào"v±NBsQ£:iu¡‚³;9u¹0î@¡#dá$cí…z;嵤›¯óNVîš6_8w—Jºúq¸»<‹¬§3nòè³fÜè/ä0aÀ¿lÁ¼Íð†p3 ÑÁ±ùé§ÏOe#ÌK¨BD|c‘ Ž}á óÁ9¯¹ßÍà3H’¸{Ô!(p\?=\ó†8[¯ûÕØ¹ç€Á ì¿,('æeÀW†k‚„¤Î^ xºáÐ4˜Ì/‚z‚ ë{Æ%%˜’™ôg…–| Ãj ès€g:œd÷ÈÓý9@ˆ0qOzÆEÈØ÷;A¶t@)/Òd—ø¿äo`y1>2 u é¤G¤§ÙCçðê ÊÇø{x§!Õ¢ñAñ}kÏ’(®N=ˆæ.w@ˆ¿—!vžXàsÁK"¯¨(ê,;«¿;Ëât)Þ¼·ÄJWõ¦9³£ÈŸD©0/gè»?nì Üåôi±ÓÜ×…Á·RNYÔËm";Zª9MàÁŒ˜¥Š‚ÐXÌÁ)÷²mн-›U`Á@ Èñ…šNiÆK"}}Ôˆb(“4Ì‚ íFDÙè-– ÜOÄL›y¶G¤SƨbÅ Rwö4šiV_ÏT;Qâ_û‚³Á‹k<74¿×È6Š?˜ºðªÈÔ÷©N .J¹Y´îC¯@ `”¡LíqÖbåpå6Vܳ“΋% ôÒä»»+ˆ$³@/–òpF´¹ I  ©‰R+ÞÄVá½dU|ÞHÔâ Qj’~Ö@Ñ^£ e†ðÞP2"åÈßѺ‹P®"z‰ÐPÄ? üYWĨàÀkGØÁ ˜s±¿{UÖ‰³}ä3M!-f)+NæëöFvó3>Þ& '„e*—)&®’Èk͡ΫÝbb¥Ù*áÚªï žFÛ«'›Êƒ»m˜¦7CZ€iáV0w:~›‰–©)bˆ„Oør½Ê¬rðÍ ¤wö8ƒLUQ˜¹(DÎ ëÌaÜðö¢oêû(ÐV…ÝG:*UÅèÍg¢d+ºËKb*n\Ý®Îó¥<òx›!M•F «V*æ„+hš%¯3V°Xn*N8/¬‡cG¶üͧ§_*š²[Ñôÿ¼ñ›×oüÿå-\¯ÞÂð(ßXxòüýH ­‹PR¡nFú޽:êÿîОÒC›Ém.k€F2r›qhÚ[¹ ªNª}ŸAÚœŸ»¨¿UÐøç 7jJœó8…St<(ŒYS ë^a©¼aÉA5ŒH…_©<+ ý^åiÀ/ês–ÒjS§Ôër]PãÀ…Ÿn÷ÜÖž›Ä4e÷E–ƒÔò¸‚Ä“àg>¢b¿> endobj 23 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R >> /ProcSet [ /PDF /Text ] >> endobj 28 0 obj << /Length 2200 /Filter /FlateDecode >> stream xÚXK“Û8¾÷¯Ðmå©XÑû1{Ꙥw{OÙIï)3ÕEK´­jYòR:ýï/Ú’£ÌîÅA@üèÐ;x¡÷»P¾¿<ݽH¯ ª<ν§½%IP䥗gE'™÷Ôx_ü8(‚h³¢,ô?m"ß çÍ6‰ýÁêˆ2ñ?Ý4¶C¿ùãé_ï¢l¦1ô¶Q$eÁº~ÓªÞÄ¥lG]miåGÍÄý§G `£q`FÛ×ÝÔȬéf÷¬©AT¹É3†¶Gý£6Ì}ÝÄ…?LØÝ°æÉ-5ŒV#“VúA–GA•ålù°ßlÓ0÷Ù|ü™Lÿxqê撚'÷ƒ§^º{cÆi°njêkŒ‘Åaæ=Âzž ù3ÈÐNu­­åê^°–R€Áü¼°TÎëuãüKýZõBÙª?ÈDÜ™ø``äL;³P$äÏ Ã‹Y8 ³pâq¯jl¿Îv&ûdïùA%…$ñnÍv:Ï9/ ¼@Âè“j)ˆ0°í©í”Ù¿g£¿¶ÃdƒÍ¶„Ýžœ Ìš½kLˆ#\Å™ô{˜¤ÆŽ/d;[^+v¡x¯¿]eãjq°q•A$°*p BΨ^$.ÈUÌ|úðËódßo €žðçžg¾ÏcäRHp½;!f2c½bð%òŒ‚ñˆICUYþªÎLA(Ï Faërªq5@A@¦ÑçNÕ"ÑŽ¢û$ÚÈ`’  '$N@èu8ƒzþ’¢ÕœØ­¹_$‰?ù«Hdž®a"hΣ HI”Uš.+ùÑE¶‡JߦEæw&ª‘Ú¹$üýçÔ<Õ´H9Í€{RæÚÉhæGì$HíkÞhmQ݈²~5ÎrÀM§ÎäÃvá„„CºØˆ¢\ðºgïÇ#4³Æ¾Ã‘+T 5 P¯#J³Ô¡v dv­•ily$Öì,ËQÞk|ƒ¯nVB-VRñed}{B‚ƒŠ 6©ÝÔv øÍމ×r„ZÓ‘Wn?`Í|zúçoï?8™®cÊh;u£lÑËž?ŽÞ™ƒ´µjǹ$7¶¥þ ê÷ðåD×îŒ2oX©Ø=’BÚÊ‚ym­Höƒ°g‡y„í˜ËF#¥¿A¨¡2s(åûn»Úq0K]cí.›WÔxÓ<ö˜U°Õ F CFÙ 'íZ~kOÈŠæí2–v¹|Ι.vTF;²à>‹ƒo­ám˜t—èÊE91,À¼8k4š…ö© ­Œ|¼Ž%õä($÷¾+DJ]ÉG3qËhO¨1`´±À/`Jœe_‘K,ÈåókÔ8Ù¿)¥kõ€ÝpÔÇþòíçMìòyEþ\s8¼Þ® "Àç21Û¨`ßxN´•\þNméÔ–>õä“–~Õ8cS$v.AY^ÀZ‰Ç™ÿÓs¹ 8m×Q;#áç5ÇþÃRhvqÁ‘ÞãÁôŒ“Pä§ç“2?ýß°G˜,”Iäq L Ü\ ý 7ª Äíeå 'þ µ.Þf^TER¢l\ayIGâo´¢±€éô^¥ 6ï ½|-ãwà‹V€kÂéù‹{Ñ Cñâ¹¥`ÂÑÏS¦NªïµÁ[6© ëµ²—ûÖ“1rŒPÄÔ–’<ÈóbY:ÍÐkIyi¸ûKö„-_-Á½ |÷¢8ˆŠ T€ë±‹G°”ù²ÍÂÐÿUaωã˜Û”`ÐnQs8—@ÏÊÀiÓ*6™`CÄI0æ¢Â ýf¡>`$‚àÂR¨¥CŸž)Ýæ6PP ÿ4Dr¥@SfÌ:⣻”8È#…}¯»Îö²zžx5<ØWJvð@ñGÞ)ØñpÝ‘ý -8v CB°j%P{ĽH8AÔx{ Àœ”0W¢žüÏpæÎKĮޅâ·dV²ê7Å2“; ÇNz-•Œú5É<k¤M ù•Žwæ ¬©í›¶¦¬™qy§€ê°M¾ña']œ…Ü0ky Ø%ùW!ïµ4»FÍ%˜D _ Ýj⤫ü€u^rm@-¼“$Õ]·€ZÉ<­0yGeFwZY}#üù6õkÞýENˆKÙzNà1ÑÍS—`g®’Ì|†¤=22 $‡¢m´¹i7·ž ÞrÃõ¤›ôqoÑA0AoÎŽíÚó E`žàˉ̷š74„x+Ĥv¢Û¡BH9™’ƒÏ¬óÙð²—KQ\j-¼¯°,‚ù†±,ip^ÐëšÞ†¯<#¥î0?cF»† „ÃZ÷Ç Ž çÑ3ªÇzâÖ‹"u7ÅjN"åÿT€C)‘w#Î ;[O.,€ÛȇPÞ 0o݈Oh!¥3íïÀÀŠÉ‹ú…û¯$DduÛå,%2À3 ‰Ó`(S e"íW(8žßÞ¥{¼ ›lÖYGË< VäþO‚¹ÙãœU½0±_/ó:ügAˆ*üEmQ„çÞ ð£ÌeÙY¹kž'øÒ(|ùã$wèeo¢$ÿ`à÷ e¹âñÍ ¬×c{I1`íù?ë5׆õhÛÿmdÎÅnÄÀ™zª]$ípfÝšz:ÙQõàX)üètвÄ×®Õs…RF¥<QÛw`èæq핊rIÔô]¯ííhø¿ EÏ\€fË>÷ñéîÏ»ÈЋ.xFydEéÕ§»/„^“`VT¥÷J¢'-‚´Œî¼Ïwÿ–O /tZð¿§i”YÊBp&]Ô$XZa„‚iÀt P=Jîv“…Ê¥‡S–"}Ç”¥²â¸Á_Æ3üÏïß×ÐÈQdîT0˜ÃÂE÷½ÁÅQÇ€9ó2 “qëü68ÿ 2· endstream endobj 27 0 obj << /Type /Page /Contents 28 0 R /Resources 26 0 R /MediaBox [0 0 612 792] /Parent 10 0 R >> endobj 26 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R /F35 29 0 R /F37 30 0 R /F36 31 0 R >> /ProcSet [ /PDF /Text ] >> endobj 34 0 obj << /Length 1929 /Filter /FlateDecode >> stream xÚ¥XK“ã¶¾ï¯Ð-PÕHæ›T|Š7»©I\YW<.ìT CBCf)bB€ÖN~}úŠÔh'©ÊaD Ñh4º¿~`¢ÍÓ&Úüé]$ßïÞ}ó1M7‡ý¡HŠÍÃq§é¾,ªM‘—û$Í7Íæ•ì«}¼ÝÅq©¶±íóv—&Ê:ÓÀ JÕ¶Ÿ|g‡íßþüÍÇ8_HŒ6»¸Ú§UɲîOϽ9™a›TÊowI™(ŸXµ¾•²Ÿ™z´#z¤—ªÆ°«Tžxñd¼ìkÜ~3å,/ùV{–ì[äZ÷½™XëAˆ–•m6v8v8![¼Ý'Þò‚ïSF{ƒæÈ•ÐI¡Fs²(ó7ü1L;Ö|LÏÓW·rûí.Ëbu?0ƒo;Çb]m=vö§°Å7|cf<£;õ s£oðÔñ¢– lß“Zg÷{¼Ê ƒMœìãò·%âôx¿æùe—G‘zöiiÊVÂÁ`Ç“â~¸ÇA¦ŽÓP# î˜nždSóøžý%7Ï 4ñ¯QÁßõ™ÑZ¥ä¦J?·MU¢ zpÜꢢ÷õŠbOÆñ°#Uã”w´fÉÖWòÅkmG&z«ÝµËÓѱiŽEàú€`Pæe ¡sG¨”I%Ð’ë2²(Wá«{r¶ÆŸ¡uï QäudšC¬dI@ôÓ801¦O åxîœ!ðÉøÂ púÚG¯;ÙÔyüf*Ì;ìn…ÁãêÆÝ þ>IlwF 2x06 (ºë $h‹ã?£¤O?}ÿÇï¾ÿôþ/WžßÍöZÀ#½ ¿™Þh'X]ié˜Ö +@gj.ÁY`õ <@Ñn‰ÿ·‘›ÝT­q ¨@X¼‚mž/ƒƒ™gƒA®pUÎ"F(,žCÂÚhj;6”¡aE3±¿ 8æ[dHgI!<£æˆ‡½b Ãþ_…® •lÄî^dEš‚4ñ¦{bùM>À2]Åd™Èí¯nå k'kŒ{²-Ä%'ó2ÄHò³¤n8¢ã©êñ’}#’åÖÏz”Ò÷¿ºx+ß-I@…»~ÆpÐ馺6` 1ödƒo8ÙÒɯ’ÿŸ(QüÕ¯Qš#F œñ%ä°ÇÞ0—ìôÔò˜äÃŽ¹4Öj4¨œpk2Á OÖ‹]:*%Ѳì—!¡€Ðe-„ϸáwBŒ1Œ¤L‡ágûC–­VL0¤zêÍ GûLn3ÏŽ|ËТ eh¿€ ¤ÙÉóF{ ;  ã¸é°8À„ŽKËF éQÎèD\' "{n_W ñ.\(É÷Õ!¹4ĉ4Ä?z,æ~roô¾o{O7ó&œ“lØJ‰ð&循æ^ç€@û†Õ¡ÞB»Ý‚‘ÑU)v¦ðƒù»ÌÕÇí!UÆu\Ž©¾Ì=: ô¢‰á|CaÝëÿÒ·‹î¬…k±¬"qù:±ÍA{Å#Š²é„¿A#/ ?Ó¸éšZ\éðë~j8î‘ fäq74] mnÃ2¿/·¢w‘zê~C&J0eRŸC,‰Yµtð¥’ÍÚÛSWϸL‹X=Mzälæqß²¥»³ƒìáÄçõèy“fЇNK—|CiÝ{3Α–˜ÃWÆ\cIën*/D,g®æ*¼jRNòðmM߼Ɣ¾)Œ1ŸôçmÈ_QÚßD9}’ÑÊ@ÌuDôY|FÞ¨›S^î‚eH»«‹R 8¥•”Ò`9”/=H‘[ÞGRi'zCx¾tËÇi(ídlHpÂÃ}ÖòW…«–Ð2NNŒd·A:G®Òí­œ]g›Ãÿÿü–0þ+ ¡^dìNlŽïG@ï#—$^ºZFž½FãV¯·%…¿æËsO!B3wy­÷’… õXñÕñª# íÚ¥<¡Pè=RSsœ£RöV¸r²tF¾¨Kè[ÂNÍOdŠðäÚeyxaã6U¸ËtüÌ3òð’Y+s¹q_´ÅÕuÁD¨eŽû½kZÊ‹T„¯†‡ eÝ…îéõƒ69Dê„•‡“íš›œ–·œçQ]Ó0ýdGá@»5ž!,½ÈὄƅXñ ”ŒI;ÇÍ 9>+Õ¹ã7CÉðxSªkðÅÙ0öú™‰såØr¦‡sËÎÀOÑ‹^HA½ð¡Y•"Æq¼[ÐMí ¶Ð|µxàC¹žÆqþÇðaÖÏoÂ,áö¤,¿Ö%ä¿’ó’4ë@- ÎÏm“À:¿_ˆ…쉣9:q² zÉ[9a6¨íš[Wª¡M€òÓ óCÝNã×RàÕ#$.û´(6»¦ ,W<Þýš;8Ÿ endstream endobj 33 0 obj << /Type /Page /Contents 34 0 R /Resources 32 0 R /MediaBox [0 0 612 792] /Parent 35 0 R >> endobj 32 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R >> /ProcSet [ /PDF /Text ] >> endobj 38 0 obj << /Length 2252 /Filter /FlateDecode >> stream xÚ­Ùnä6òÝ_¡7«iEulg23ë`áIÆÅI0 %¶›°ZêPÒx¼_ŸºØz]`ºY,‹UźÈ8x âàÃU,ã÷««oÞgi$Q­T¬ÖA’eQYTA¡Ê(ÍT°jƒ_Ã4JâÅ2IRÞÛÇ^w‹eVªðïº_$aÛÙþ‘väñn˜ø¤³|¿¨óгø}õã7ïÔQ]¤îË¤Š²ªä}V³Xæ*§öáóh&Ýi·ý<ÚÇÝä~‹U ¿ ’°’°ã‰îp2,Ò2|…ç„qׂEz„7ºoá“ÐÌÊö­m4p’D&!Ñ@ 9m4j—$¼I–¸Œé€ HÑ,Ò*|b³¶Al¶B=n†¹kù“3ÓìzÆ?ÛiÃX͈µ¶ÝìÌœ©ÐiØÏɆ ¼Èà^dŸ8ÔÚö'"Ëù=oLú•È9WÕ‘J:4iŒý‚’›–1hr0/1Îõã¿ûŽÇw7Hýáæö&bºÕÆ3ôã<Îp/~b„!˜ü‚¨v»ëÌÄÎhæ, '»5Ã<Ñ'yTçùé¢_úõহד}ª,“åi f"À4-‘iŠÀ3î3¸'žÙžG:|ò ˜4à zdwà5p NžhoPä"›œE_^’ýÐ=²Z¼*÷^À±ÿǸQèü‚uÃ_ºC¬ÐrŒf=0f×A\žíÖñ&ìºãÃÚlد[™>[âÐØA\'áî¢×?£E³¬N\’Ðû ÐÙ3p“^#vÆ3¡È;fçíŽ03û‘QÖÓ<¢¬_ñ(­qòÚ4¼…ù¹¨Ö—A7Pàyè™’ÛeYÎnW’Ûá”Ü)6z·ãsïaeÜ9ÝL¶1'Ë,;M–©Šª:=$å(Á´œ×áOn†Y_ÄÀýÐÍ“úWÒnu”v3uæœV 'nX‰áiäùŽö¸á½NU( N8¦8Åqvø,é+›𠜠[2H˜¶¢¡žCÙï‚›QêŽ+Ê– qTp¢£tóÇlGK†!„5ìPàEQû\†_½ãÙ\_)’Ð~›ÁÛh¿Â^”Ÿù¢ž5–Š-†“CKhGÁ¨Eê a4²qìµs\øxÎÖÀq„ËäïW’dæø€Ân·¦µ<±¤¾øU‡´t±vXÏ(pÇ(4‰÷_Q]fzt*¯‚,*á’^ðò¥-Ó:Rquäë©øúý¤§yük·~;`-™ö±tÒç`0•QU:œD:Žh^n~ºeà—ÑHcóŠðl§a¦y®Y¾`äã¶fMèÞê…îå¿r¢Ó‘d€À[ª‚s%ë¹cƒ#§ùb[:CDÃìYN´Ó“}°äð-]@z mʹ‰iùØÚßâ,»kºqȺÕi¥ÖP¬=d ÇÚ=Îú< ‚‰R\!¡ƒ€äW€ ¿Ò¸žû†#g’d¬/w̦5 ^ÞS—›ËnŽ=€T6Úaç*³‰þæ­îGžSøÓn·Y–@üóγ… V!VdÕˆ–xaxxXÏc£%½ÀrìŸ.)ºu¢¹8d‡êõÖŒ;-EàbÃ#þeÿ-;Q >ûv +Ž3#F5å“}oGqK‡„Ñ Îb;³Äæ1ý8×Ñœ´ o[òl‹8à½À€K%ÒW?|ÿùíǻջ­ÎÖžG]D¥ÊዯKTú󂕃±§›UÍ %"—ŸSÄbãWQ†dõc®ÇÉÍÍ$„pÛ¥‘h2_§ëo¹[+“¨Ê˳bãéØ ½ýûë‰Íõ*ÌC'”Ü{/ç:)_ðœ73…+72¹PÈ_·v ÖŽK±¶:Xû‡t7‹º€$7¯™½Œ£"+ÎÌž9·²ENfÇÌ>’g< ŽìMˆ'?µ7b¼¹féàã j·Â¿›o= ®6ГEeZŸÚ~d#Æ´â›Þ¾R±Ä´} ýñ$5˜{K&ß—Ã GBʸÿÁÞÅ™d¨Ï¹íÕë¶Oâ<ŠUv0¾ãã6ŠzG·õq}¦,õ  ×ÇVBÇnõ¤¯ÿ"`E„‚ÄJÌÛƒVï>}BÔÇO¯ª'–Õ¥WçÞn-\â÷·²ãöøLÅìDÅœT,YÅ\î<$è÷ÑÁ]j“—^„ón9ùÿtËwCµAÁò[N’Ü­±“Í ·UEú¶×§"Û—¼¢C)¨Mö(ϧ"(°² }‹pº Ã:ô´CÁ’­¸7$Gƒ¯"kßpé|˜'^eׯÞt'V^bºÙB;ê2^.»*4_í8qç3ø>2ä6Þ;(ND@hlý«€fÔç™DÉ•Y:ÓÔ î´ü3ªV³ßX_jLw³3>HšaܸMÉ …ØhXx¯š¹⠞˅›3$Ó˜y·ºúã*0’ý#Y¢ ^“4Û«_ƒ>‚@à|UðL¤[ -£¼Jî‚û«ŸåÉ­ ´Å%GLžG•*\çâô´3.‚ã óÜr ­$c¢[<κ„¢¼HknñK2|M ÖÜBÕàå¾ÕÜãð¢§E3aéc¾ãÅa¹Æç:ú@]Œã°õ˜}ßWû¾Ïï ü^˜ËFãä‹\1üKò±èxxoçW¥äú:Ê qÅÃÛx î&Š ˜q—˜B-V¨ÞpàïI¨ªâÌÊK@Ö°ƒmÑ)U Ÿ~èâÅdÏ$p×ñ¬Ñ3öæLºÕ'¯ˆ¦ûgžW{™ø1æ â{«x2š ?¼žàÝ7ßÝÞýs‘$P/ÿÁ»æQ®ÒùáDaí%343=©Ü;„<0/{B¹Œë,µ²SŒ·ißÓCLÝàJ|ø; ’ÿTŠ:ÊŠ"€DÕµqu^äÔÀ endstream endobj 37 0 obj << /Type /Page /Contents 38 0 R /Resources 36 0 R /MediaBox [0 0 612 792] /Parent 35 0 R >> endobj 36 0 obj << /Font << /F32 14 0 R /F15 9 0 R /F33 15 0 R /F35 29 0 R /F37 30 0 R /F36 31 0 R >> /ProcSet [ /PDF /Text ] >> endobj 41 0 obj << /Length 1848 /Filter /FlateDecode >> stream xڥ˒›Fðî¯Ð-Èe±‚$’ÓîÚ›l*±]^%9Ä)‚‘4µˆQ1°|}úBe6v*====ýîÍ'»É|ò㫹|¯V¯.nT8 ?ãp²ÚN¥üE²œ$ñÂUÖæaš(/k:RYSá§Ñõô¯ÕÏ7A™K_-,ËÍt©<[Ogaz—oÈíá˜5fcJÓLÃ¥÷ÌèZgÎVî ¬"¥@4(°3ÓÀ«@)oÇ8T¨1¶âU¥uál¬\‘•¥`À;ݬ‰Mµ[Q©8F¥>Ïã9üÔƒ$GË&,92RJPMmôЉFP †[G¸`I怚C쌃•.x½™† Ôáþ( eù¼®Öú‰±[[ó%¨Ëâúlc¥ÎmÆþ2 Oþöôx”‚Ë­ =ëH0ÿ-[²ÕËÞZ2§[‰(<··mYlÇò™á„퀫¬â¯~jêLPõ®=è 5nÀ‘šÃù*—ÃõÇZ32Ý`ù»X¸Ølè‰r¦‹Ë«Û³Ø'ûÜ5YÓº¯î5„h©Ýñ?K;¼`á/—I’„û’©6î Œ¹†8Üd9&Ì=b"H‰Ty¾ŽQ—ÙµäØ{Û'&¸ñ™½©]!±Õ_œ’7Qr¹l~ϘíéV\BÜÒcÑ ä”[qy˜#(ÝÔ¡ª5ÄM¸ÖÚÙ’Ó¥ÀÜz3æ<¼¼îôºÖ¹­ Œ•$ 4Æ~ÀÒ@A[sØÂ‡ÛKÚÆŽ‚„®Hˆ é½×úˆ¦Š€„}¤=1¶¦ÀÔL¼×øθ>ànë^õ‰_ëgÆå¶È×6̇Ь¹´Ðç °ªRµ×œ¨–Ãú ÉÀJà-)“ òÓ(:7ö-¸MÅX·ÐGQÑí/AÛ+«v믊S©U¸eàè#ÚËpI…ð ŠPE! À±& ½¢Íé°B‘ fÊåX¸sâÖWr ÌÆ4‚%M‰_[•Ï\+ÖŽq'ßáêõ·•õ?K§¤Ý¦Ùƒê)æ]­·mÉ‹Öiì¿‘wÈòÚ’‰çª/ˆ|¬"ºÑL}žÃ¸;Èa\>î {Ž—;ó0`™ ¢À³±x¿@RÌÁ›{õ¤jI}ÄÙŠ¿»<ç-N±¹ô@†¡ä˜ u=#:W㌑iÜ–ž5q–#5¤0ŸçJéÚ‡ºNû­ríQ:rÝž4 F.U²°Á! ™lAi«LèÄd`tÕb>44ÒaXY^Ó[;Íè2iLf ºG9ŠÞmÅ‘š…AŸ¾éæ L5‰÷¡éïoØØ š $y©!æj9ž¹~L‘ ÁñH +éƒí©¿ƒ yaÍ£Y ,•ú±¾×Fæ¨ãÙ:eýrºÓ2Ê]__¾ÿ®›Ôdò£Ò‚pÙV¯ó ïºBRÊѬá‹æøýÅEžcSª|ûw™mœoëÝ…©¶öâ%Îþž‚éPކ¾î|Q¾yrXþ‡Éá4ªG29¬Þ^­¯ywùi}{³¾¹ýt·šÎ’yìýJí¡+Íß»#ð9&@„®Mæ‰÷N-| ‘u¤6]RÐÃDñANÒŒ씵¶zu”P 7¥>Àžh<ëoè-¯†hTă´q¶CXñt ¸³9˜·LU˜ª€ÌFJMi¦€ zŽQ9:vl]¾ð‚sEv å8”~Œ»ÔŠ.ŒËípÖ`Zl€Ž)ûãÜ”:Öœ>~¹vBøÈMbx#éBg;+øcúüÄ…ƒ$‘Ñ£`N:(:«î=„UpYؾ>ã’«œqrgn¶rpÔw€§¶‹½„ hÊ¥d¤ŸœŒ SnÁEgÆJ2u½Ðs–›óBºý`äc Û'Ä>è:£¾„»-ò­Š~­ʸSœÂÙµu­Lg€€á´-êyÈÇŒéL*/\¸Ç öåº!Oϵ¤XˆTЇq ¯&Sfu·çZÍÛÔQ/½ûîõù RFÜâ® ´ø,êFCÜÐOÜ,a Záná@ýhœ’—0¾¸—g)΢”RÞ¾´·XÝnGÆ]Ï`2 "Ë Z¿¯!è>ª!ý»š‰,E'QÛi² >pÛóFW) ƒpÏ(F„͈³ºj£ä¢›))€á‘wáõò;@l¤I ì •Ú ŸÝ ÈÑTè8a¢ê/š£ä@Öt¯}†`ê£ZƼ)™D¿rø/ÃiÐÍJŒÁ}×Ú;°ûu­½ÿkUW2c ¯Š¼>c²ÎýmåýM“ I}•$Pê—þ"HY±ôŒæÝêÕ?ΨÜN endstream endobj 40 0 obj << /Type /Page /Contents 41 0 R /Resources 39 0 R /MediaBox [0 0 612 792] /Parent 35 0 R >> endobj 39 0 obj << /Font << /F32 14 0 R /F15 9 0 R /F33 15 0 R >> /ProcSet [ /PDF /Text ] >> endobj 44 0 obj << /Length 1812 /Filter /FlateDecode >> stream xÚ­XYÛ6~ϯð[d VtPWÞrì¶ÛI°1Ї¤XÐ +‹®DÅÙþúÎp†²ä8¾XäpHÎ=,>-‚Åoþ¾X?zzÇ‹Â/Ò(]¬·‹0Žý,Íi’ùQœ,ÖÕâƒù¡ðÃå* Eá½ëôa¹Š#O÷ª‚A{ïu3˜Z·Ë¿×<½“ÉyÁbæ~œgtÒ­Úëe”{ŸñG-WQ{ëW/î^¾¾z~{ws}w}sû~íãBä½5;ÕÑðˆüº»_&‰';=´UO ²S4™¥ïëM£žÀDo3º¢Wjz±ú$âLe1Jü<*&ÊF¬ì{#ÍÐG¯œ6½ÔûC£ŒÉ\ü¼Ãı§I*ÅÓ&!ïiZ÷øÞ¡#›Ô•5(¬MßNšº”F1yç¶l‡¶D‹Ë¦6¸÷Oܺ}ÿ u§*§n´C¿H’È雾£º *%ÞÕ£Úªn?Á!Yâ­wŠ¿+Y¡#p|ƒdÂ{U â´ócãüµ„³¾“S÷#¥S½ê€y¨=R>Ið¯êÐXH„iH GäÒú§uK_³ã£ ’ø+1DH ︫K¼oGk¥ämò M†ÞݾÕªd•°F#%¶ƒ0Öâ0÷¶J⤷Þ¼ˆ-4|X(–å;ͤ#—3CÓëîíós*<@ézL+KhQ‡a¿,yG –4€½ÞÌ$OIòQÏ8 ¼á`­ZALUD±±ßC§È-ö"CDÝ0WIÞ¨ø˜m§÷4’e© élÐg Ã r#{›…ð^ |¢ Ñ)#[ê’ÉÉnp@žÚdÁ¯>P܃½’HPa`–³ØJ«ék¹$'Š=eK_£šÆÊn¯°Ä•FÛ"©¤Œ­Î„¥ÁçNo{Ðî¢ Î \:ÊÞV›ï¢ü”™¿Xuù$›ÉlÙq&ã¸ÇˆÙé¡M*$Ajhò½ÄÌBù1ˆHˆ½4DøL¸–ÄË `œf¸fi¾S8«Mm0{1€}mË.ÓWxUEï$@MÇêK¦´uŽ*Y3Žð|dgúg¸mìm‹ ˜pHá§÷j'ž«$¸ÒÍ—pv OÆQ… J^¢JÓÓŠq»œéÌ@Äí 2Ùpõ§D¢•+ÞóXÓN‰…Æe2Œ¹EvW§\›)·7~–e.ª¾mC}8œŠ‹ˆúBÌÕ1߃Q}_Pv>Øæ=U«ž—Ø2²­øPMßcWõmŵë‡ÎËâȸÁÆ l˜3F¥FPq¬]Jždƒ‰m ˜ ºm.vð-¥0vX‰’¢åÀáA67kV‘^#±5Æ`F¡°®â„º èJ‘¶^goDC¼WXC– $?xÍÔ{„=I˜p?„Íèwk©Î—ºŽp!ËÆûâ 'ïÐL ƒ·ï®Þܽ~ûòOšwŽy´õ‚éžI„Ù÷Â]ÀŒ/dìbw‘Ç.7F—UŒÄÙ¯·³x€B “lš`­ó ·»œÛÇx*‡®;õA ŒÛÀ1=Ó¨äQ¬ƒ(€ !æbsÝÔ䜀cÙà¡j§iAÊ=µ*À8+Äàc€³Äq‚8³‘®y¯‹˜š¬F"Æ>Õòe`ÓÒ5 î‚”;r÷³f˜ÛÓ‡ÀT†`Ê&žæPâDkSˆ¢5g¹=ÍVV„prÚlïçÐ÷rqû!tO~ºÿ VN+ߪ€è÷4yÞ1j~£ ®¾Àõ,Ó¨Ësƒ($ OÈ0”›ÇŽ<–W¤ Åð+©ÚÜ„îê ô0„&BŠb­ö=‘dâ÷Þ^úÀTë:Á`î Õ™ÚLäà’ü⸔µ²ÕôãJÍÂf)Ê"ÛOX¸pq‚ÔíÔ¦hõ¸âä­ïÙ(ÓH‘ÈfQŸ®«Ÿ‹’ôÿÁUîaƒKo©lÂpŽC±Í¡_Ð5¦“®¯VüÒ‚‰ëžcKÖ¸c™…> ;ÄSÔ7¼Æv²cÕöôî°Ç}u0öžî외gFŠ}jæ<¶"`Û±Žƒ šø= £“:"LÄ7z¶ˆÆ`Fò±n†ÊÜšd–)^GõŒ<8½«òp?|áeàHµÏž1Ñ<à"®fXáÛÇÌ?«a¹!мViNáü _ŽîIT^Žf”þÅÈ8gìt9g•¹± ÓÀÇÍ }m RYœÐOÏ£XD+tá.šÑ#,{mèQ̰ïàÛZ b$bÝ"¢uÂù¥R8ïziH“–hŒLjÈ#Ü·Üsç­YáŸü*ûzŒO*÷gŠêg­Ûqr6ØJN­•gp8LS?ÏR¨…Ÿ»<ƒÓÕúÑܲ÷„ endstream endobj 43 0 obj << /Type /Page /Contents 44 0 R /Resources 42 0 R /MediaBox [0 0 612 792] /Parent 35 0 R >> endobj 42 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R >> /ProcSet [ /PDF /Text ] >> endobj 47 0 obj << /Length 2075 /Filter /FlateDecode >> stream xÚÉrÛ6ôî¯à‘š‰X‚àÚ›³¸£6uÚXmIGQ…š‹J€qܯï[@YRäi/âÃÃÞ¾@qpÄÁW±ÿ¾^^}w#ePEUžäÁr)£"/ƒ<+¢DfÁr| “HäQ2› ‘VáSn´³?—?~w#²££q0e$Ë‚½éÛ}£Ž˜T&Q•e‰§MЍ,óƒ¯O²pùöõl.‹,|ÛÃW„Úòò¶w üf5ËY•†ªi˜®þo‰~VÝ,)çð.ÂÑêÁ2ØoùK¼øgñ^ ÎÔc£†ÆŸ¸Sí,)µ‚mq¸„·›h&B XÕŒc!q­\?¼B° U·™Ž¢T£—e§ÇDiT‰” ñÕ•2|Ô¡Ô[ P¦ã¯â{fˈº'ýþêøätiA7¸Ê/(ŠîŒæs`‡R†“?å©¡“,*+ùìÎHøxùeèQÐ$ì­%\q×7£3Àãe—•|ÓÍšVñ‰ Ú·cü£q;ãa2Æò·Ã˜A ÆX4p%@„îúñ~Ç0Z¿Ö™í“GNŒÉ6ôÀu{Ñ#ϲkÓ4eצ‰ øIBˆúºkÇQ蘂˜ÃÖ¶jí©Aìx#Q­uÓhàß¡C‡’6f=¨á‰d8ÂfExtIäÛ¾ÓÀ£Ñ-•Å2i&ÈNCßÒ9ó†üžÔë­5ëz^TyxÝñžrn0ëÑù#(°ÕB ^P¼Xu¼½¦›<­Ú€ ÅA³Í%1pØ€É.8Tñ»Y¯zGwÈ‹y“€¼p5†Ê†êÚ•÷U·vÊéiy”¯ŠÂ’ð­®ñšêŒm_aàŠÐzÎ5œ0Ÿc™Öpúé’èàOÐ1>ËBD€©­kz$:ö‰áC•ÂÅ qz.èÄð¸35Þ°cÔ#^Ð͆—æ¾ëÏ®û'ñQ)€¥8ÑÀ×d5Ü­Ï|42’H£*MOu]î(N’ø„1,Á-CŠ?§ÆÄN`6ô¤u>ñ÷é¯hp¨A5d.%Wâí—L) :óå £Áj(øü’äXl§2Œª‚‚(©JŒjåÅâ56Úºr뉜Ë?žß¬sRl1h7 \m!ÍüÑãˆ>¢n f^Š.ã SY„× t¿“~õ^c^ÎÊ ÚÁ0ÕÍÆòêÈ>¸<¤#.X_ÓlD AwH=@Ø#Í0-DÎõî„λä‚èÇ@§±˜¥TÑIhJŸ°\Rú6•†­êÔý„Ûé–!ŒÿÑšîþ›®·ÚZÕØE¿SY§U3‘"ýt¸ÿ=•§Ñ«©ë> MvoÑ×è½åòãâõoËw«ë÷ï?¼a¯~øèüTr'Iæ¥Lói¶’ ÄzÂ\%JÇá/3œ¦ÌIÕa¯‘9Äy.ÏÃ;ô–5…@£Ö¦aN<1áÂÚQÛ—F9•BxvÜøy’[½yÿîúãjq³ºY|¼[‚ÁÁ\‹YìijƒøÆ¹'#ñwAD¥ô{ z…›%úOKÿ±£®Š‹BHH-¬X¸¨è‚”*TgDJSˆùš µ XZm:T2øyð·Â%–Á}£jí)•»” =°“VãÈ—aŠž<ÕÃ,ËBE)žûØ€­k,So–‹ßß­ "~âH¥? i5_ÔÖƒ$_Rcx°LÙi.v/!•AˆÌÔ^š áÛa4‡ªoyŠÝë#9)ˆéc7=©³#`ǵ¥‰ªš+ Ù?gø°#{"–\@Û[çQ5ŸôçN4b”Á”jŸg#jàöbáá˜(}ãÀvþÀ"tºá cŠõ4=“l _É:€3[þ*þœû¿§ö,±ë`›geæÛ$’ϤߺI&å ( ÍŒߘŽÅ¥:ƒµ¥ò ¡Öª®$€öº`Ú êù«ê¿G3J6YlŽƒmÂhŠFT–2>ÀrBIN ÆçjgáŒÚ[_ea ¦ü·—F Ò?ÚÄ`ä×7ü7¨ÁwOUÁ»¦VíoÀñÿì.~áÙ©©¢™`*§×“NJ³.§fèY«Ú4ÝÑjÓ£‰_ìgÃÃËãÑ‚’ª"sŠ˜_‘ÀL1fô£”¬ø &xôÆ5Œämïo OÂÞY‹D iAÍè.˜Ó2Ùom¦Gß°Æ9"”aÓ@j¿85ù—k|òrS(ÎåÃ=²’V<þÐIÝZÝ0‰eÕÃÃ6Ïå]¶€½AàÐN |pÈLÝO0 ò¨*d‰MAÊ*JRÈ(ÙÔ ìð÷Ç»åÕßWÀ8‡>’8‹D™u{õéÏ8ØÀ&8=‚D ‰´Ò"JËà&¸»úÕÿR°.øo”4ʬDUFSÔK/b@ ¤K#lž@û”‡/¥ŽðUBŽáõdoq ðkhÐèú âõhÞ~T8Å_9òS\Ä\téªÝ´Ùï¡v˜¨’~ÏÏ7ª|D4ãÄx¯pL„»_ÃÖÐÏ}ØdQ"«`^EiîM_ÓcSÂCæÑЬ‡¯›Ñ5æ ^¢á›] åEÆ4‹Òˆ™N2du ͰĆ”T`É9çq|Фd|tø?·Ðˆ·—¨•Á¸ò·/¨âå#ž‚ŸeľÁÆîð^E Ó£7ŸÌÒÉ–±nøBÇ•÷Óñ Xƒ£Ut˜Ó÷lÊy•`óâ/ó#ÄyLÿ <j endstream endobj 46 0 obj << /Type /Page /Contents 47 0 R /Resources 45 0 R /MediaBox [0 0 612 792] /Parent 35 0 R >> endobj 45 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R /F31 8 0 R /F35 29 0 R /F37 30 0 R /F36 31 0 R >> /ProcSet [ /PDF /Text ] >> endobj 50 0 obj << /Length 1720 /Filter /FlateDecode >> stream xÚµXKsÛ6¾çW¨7j&fHðÝ[ÒÆ©;mq4½4LBÆ|¨ióë»/R²«¤9$q±xï~ûíBÑêv­Þ½ˆäûfóâÕy’¬ª°ÊU¾ÚlWq’„E^®ò¬U’­6Íê¯ ã0^ŸÅqï×qà†ýú,QÁàMB™†víЯÿÞüúê<ÎŽVŒVgq&eÁk]™nX«2¸Ç³£(w"|Œ’L߆ë³4I‚‹q}¦ª*xX«"ОšöF,ÙÆh8ÒKlÁÍ$jº{œhzž8:{ou˽Þ8Þßy9é‘×8ýAΰ3t7ºMVYη“?¼8 ´3,ØÞް‘ýdû[ÖlÇ߸gêü(ZÛ‰ZÓ!@¨uÏ‚·Ý¾•¦¾µýݳ•†V¦ŒÍ,2/»éøÈâLpÜ|°=` ÙcO0T–•:x_‰÷?Œ`W=Nþ Ž.yÚOÝŒfÞA­øjÞ¢Ërm ²`óó8~‘ç¶5žÅ_ôÀv¿Nb h4ÒwüýÍvvüÿmvWT1øßtpi¡Ëñ DÀzè,ŒwĺSEùFXï'êQyY¥C»N5þîXþÿ©HbüLˆ²* öÚ¶žZíÈѰã&¶ÍÍ—ü“,ÆûfÑùB/- ¼àt)˜Öö`½íMk¸<„³C°¡ü°³óÓ®ïÑwØ‘§ü½±2rì'ÞŒÒ/04®×-ÞuN¼Ý)ì¦ÑøU$Š¿´. Høí‰]€Crpæùº„qÜåщèM¦­a" D“WY0lyÒFߨy—l«Ì\&$=úûèʨbà‚Ð Œ9Ïð9ÞÀîŠï‘EKg(gþ™æ“:ÓTÒ8ÀD<ä~A´oô„c¹³š‘Ë ´?Î;U_¡ ëïÐHi\ˆjït P4´[øx &O{<ÏžÃã¤c`yÏ–ìô‘Y‰ñÒvöÈ@Ž&Ý~rÂL€:A$l^ §Á Öh/b7Îe—eóËFH€tA ' ÚV’çcœ¸ ’3 ´ •âp§a•¦Ï0ÙÜ\|tÓ#0Ða€êÈ-ÐÐÓ8@¼ØšŽñÝ=Ö#w3"@ÉŒ®Ô1êˆ*æü]†ûznÕÎ+›e¥ŽN{v긄2¸àõúÏ·W..ÿ@3&)áͳèå>5¨,Úƒ<D•0ûÉ¥OØF`NQšÎèaï8:jã‰"P'NÉ‹xð4Äe Ñþ¨©q–á&"‡Þžp$SH”%à‹¾ÑÀÙ&æÏÚ‡Ò&$Ðíh)q+†ÁµèÁÌ1~×ñ0:]LéåNúî¡0„6Buæ~è¹|ˆg#€t Qh¶ƒÚ[=÷ÜèÙH–Öþ4CÒŠà¢-`]œ…Ñ+Žë:½Çú*MçtJ.7@ØéýìTVèÖÝ<²óì¸c5zÁÑ‚ãÁœ&O(Ô³)¿T¸ &Àp៞ú/üÓ‹ãÈU,pbJr `Ù†R3d1­Jþ¾žn'?~*Šª—Xûå:,/À,ElØDµªTý A¸îE6ÃN*ËR „Ý bcê”·½ý„Ñ„š™iPž³)ÊTñS+ÃæX8ánã4³*úªo<¡K§D¶p8ãwK(S9òu…Æw/“ç…àÍ5ß§,ò$`á0²f6wÌQP°Ñ ÃÉPn$³‚ ø ¸©{Éu{ÔLæ ¼~ÁWD¸˜§\ËVÇ5D1ÇáÑNxœ˜DIA…%tUü·cß’‹¶ýPK  eo ¨à¦2ŠN‰QJ6å’›a¥êY>äwhìô¡<š»’rÞå¨.¤p.¤p>q·Ž“Ã0~UÕ›|Ã7)B ÷ÎÈ=žEæd”–l„ 4´*s441ÄLŒ¨Ã+âã³B¨'×Y~˜eVì[ppÀ¼÷Ø$ w'†õ…¤D”ýbw¥·AØ…|HðÄF=9g¸g>7M'*MÖ£% ÂH0ŒïÄ2o¯®®//?UéÚ^^Mòò(Aö`] ä…ÔCXŒèÁj|ÉÓ ß¼w¯ùq$ùú¨¸>xà£gµdÔçï:ͽiº <ÃøùÆÿEœ°ò)ÃP>]Â!™)´~Ú/T!,»$[JHÆ$aÁW‡Í–ÿcVqž‡e‘¯Î2æ¹À3VO½Ý¼ø|‘0 endstream endobj 49 0 obj << /Type /Page /Contents 50 0 R /Resources 48 0 R /MediaBox [0 0 612 792] /Parent 35 0 R >> endobj 48 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R >> /ProcSet [ /PDF /Text ] >> endobj 53 0 obj << /Length 2384 /Filter /FlateDecode >> stream xÚ•kã¶ñûþ };Xë$QÏEq—»ô¶(‚&ë @“àÀµhK8Yr$j½{¿¾ó¢ìÝ8HûE‡Ãá¼É(ØQð÷›Hþï77o¿Ó:¨Â*Oò`³ b­Ã"/ƒ<+ÂDgÁ¦~V:Ôa²ZÇq©{·Š•qó´úuó·ßÅÙÅâ(XÇe¨Ë‚—};Žu6dRqVY–mR„e™û-RÜ ÉÔ'35«µ.2uß~µ¥ênbÌà6:´½­yîÝJÇÊñüæÃ{¾­qíÐ º=Ø?—'e¥¶pBXšÊ™¶ŸY*Ãs€.Õ|ÀïbʃÃŽéŸÙ$8\8´=ã]ch¬j<`ü H¥~¾T$Âï÷N3˜ŽŒFÛ_"¢NpÂ8<.4ÎÜž•eÊRnŒÁœ=ê2ÊUmwfîÜÄ#70e¬ãpµN5(²ieŽÿ™šf>&c ÿPÚB Îu¶·4ÿ…'xï\ufÜ[fPgÌd§‹Wüe‘Ò8QÖlQ XÍ1Ö vq¸·Nü6·Î"¨U7ô{àßâ1Ð0(ˆhþ‰6GüYÌ0ñ’8º¢ˆ'wÃÈHGç÷ûƒRò2Qwý5eïáè£éàpüý3©TËü <°ñ†ij:Ët¤qTW? ÖNLz"qi¾±Œzƒþ‚#Ç}C¾ú†iL?Vâ,´Ñ$xááγììðWNNâÃW¿ $ ËJ/ÑÆ’ þ…¢ Gd‡£(-µºº™|ïOÃðÝŽb\§ $Ñ6¶ŸÚG: añz¯°Î©Ç†‚9;¹¶ß3+ò<ø?®²L™±Ðàˆ˜¶¦3l±[áj Ì‘ÐPÄ;Iwó°*t‰‚ë$uœ:ÌcÑWzå|i˜BB]_PaØe½U¾Ö: ‹2}éúñ[s ãÀÉ:kFÙ"ÇÊ62Œ3ÖB3ð Ç¢5Bhxvr£iÑoJ%]ëUˆŠå’EÀO”à©i}(â°f˜š®¹¥N"5µ_Å.08AÂ-ƒ#…7$;ɨ1#¥+Ì=i¤~òò½ÝÜg;À<¯À¹PŸ œ­Ïa‹Ž•x±)¢ä ²5§øqX_½ž·$JœªÉz¡TB†á¬Æ› zj×nIÞéÌ2© U–]¨e,a>Áó ï²\0×[[˶dA O`ä‹í)&ê…úÊ)l/©6És),HÊ3ÖÈ-²Œ¸ rpÍR“&^qh%É0Å"qºš’¯à¤üeBÓ%1¤™ÑµÛÄéž™ ƶ&Å^ܲ´F*;’Zy\=ͤjîkJ€tõÃç¥Ü¾P%¯ NÃ*M_î°!ïG³¦àBË$sût=ÐqÝ$zæ|ŠªF¤Ô€NèF]ǃE)Èg†Ì~P¤ž5üÇQ¸Ñ¤éÚƒïÃ…2ÿ§ ÙÿóbSò²»}NH][¥îý} ö¤öny»à²+ÏfvÃj.Ü£°zæùã8°ÌÒ¢"eÛ[èòm+&YÞ*ø1ã@&£‡f^-‡IžFþX“òr«NÞø7ü+ã*YÛÎÎû&Þº8¯ Ò=ö{.>ˆ~leý÷›ï?¿Ã$¾Ùüx÷þ§ÍÇÏŸÞݺ¿ûÏG~T¸Ö¿¢=Ó¬oK¡Ñx˜/ºvª„€Üv\÷²s¬#Ú{#⩜ŽúH_ö‘?{<˜²„ÛÇRáu÷z›:!$ïŽÎ]>4 |%ÇW#NM‘OËø^U?0féÀj­gyæá·$)D)cJÿÑ’÷#¼K±üÀˆÄ([òNÄ·'T;EÁò’ùqsóÛM `ÄË#f\åt÷Þn~þ5 j˜„c‡º*ƒ‘€´¡ ¸ îo~Ñ"Èà–_ð‹hš†eVqY…%´—÷|F1ßó!´ÖIXÆ„ÍJC‹åŽß¼};bS 1×ÕJ…Ã×ÎâÍ­D“Áôc W€šQÒö¡ßòÅýìÖ|»Bl?çÄvò;‚¤ÝËJ/3Ï=âvvôÛwƒ—ö\ð4b­áèZÈ./=Æÿ_eÄ8ÏÃ,¿.ʰÐò&ë×Îö_is endstream endobj 52 0 obj << /Type /Page /Contents 53 0 R /Resources 51 0 R /MediaBox [0 0 612 792] /Parent 54 0 R >> endobj 51 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R /F35 29 0 R /F37 30 0 R /F36 31 0 R >> /ProcSet [ /PDF /Text ] >> endobj 57 0 obj << /Length 1834 /Filter /FlateDecode >> stream xÚµXKÛ6¾çWøRT.ÖŠÞ’Ó¢@Ó4iŠ9Ä@IQÐm‘%ƒ”ãÝþúÎp†zxåE ›ñ1óÍ7*XÁâͳ€ÿ_nž=G‹0ô×i-6ûEÇ~ž‹,Íý(N›rñÁ‹ýt¹ Ã(õ6¯^.Wqžz¯—ëÄÓRÖÊt$ykèÿWu¨êÿÜ6Ë8ò: ˽fz¥,—m~{þ:LkE,VaáÇENÇÙC¢"÷ÎF ú3¸‚}šC-QTx5<©æÓ2Ê=ØÛÎ"p´o5 ºJ²tfùËU­½ŸêºE¡·•í{–ìÎÚé`ܦ8Þ]ÌHÖ¨§jh—µÁÌÈ’Jž5£vˆM{—Jípe…‰W |øŒ?’&ÈÏhH[“¬$a˳ÀB0VÓÚNå <³÷æ"Œü0_ƒ€lÄ® ý霫4¼7²£­-.8 \pãñµ@ó‹MîíPO<»Õn½2´ìRµè Þ¤=Ù5R[@¯t¦*F³*þq®;u²DBdsè*oQ–‡~”ÞÑдŒqgâ‰Ú>Y˜LfΘ땨u¢ã5[;™æFé7OëÏêþþ¼¦cüŽx¨¤ƒ²u&ÍŸÈ”JfOüå^Uc™:²§pWËÏêH°ï#Ÿø•F_?¼ÀP<µ§wŠç(Cÿ?ÒŸóŽ·gžóÃcswì–ÈçbŒMó¨à@awÂHö£–GiñÙÚtð(†T0ßzhyšá÷¸ÓD«þÄ‘çÒ/`Ý•û r_2ë¾6 ¸0H!ž¦N6«À[À?ÎB¯=ã¾Y41äƒ8éè%<üï…¦w6APKcHÒU¢q2 ~ÇëGFÁãÔ(tFV€3º³æ-¦ËÒå`yäH¦ç)FU×.¬,Øy°©ËnŠXÈì4¡ÁLw¡±KMH]«Ý²½ãŷ̈ZèƒK£M{>T_Bƒ|Ö ïÀóú¢zNX»–XÝ^Ñ£‘÷ÝF¹$‚Fh­ÐË"!BX=åéW²–V‘…±½‰ÇiŠÊ‘Ím”¦-BCEúê’ð;×»‘ózû*a¸bQõªy²ð2[ W´B«qç*øÍª=Uþ\€N}­àD­¤wKS^Í%PÙ±Ô7ÈS|?í¤¡ÓÈZÑ2O%×ß6쾦>¼eîÅýߥŽÑŒ`Ó6«¤n¯]™ùëØA´…mÿ1H?áÀÊ93¦Š>8S†¡Cmמ]ý˜$ÝAk‚Ó\ÇZà§iX¤þ:tÝÞÒÕ"’ç.šóÌ;¶Z’Œ“Œ^è§gl}lcqabÙâ8Á½¾°„=ŽiÅUœvÛúƯÙIDìQ"Ëýt»Fww Ý÷êØ×´£ÐŽŠ•ÒÈ8½ síh69áš0£S²§ôLþïæS:`Õ€+ûèÜOóÒŒžìH³§zƒ¾ÇŠóÈÑ+¶¹·w>¥x½½ö6.ii…‘¼‡-aøßÑ «*<#ßÃp=L1­å|•Í•¹Ü¯ÇÞ­ëZ¥$cãš.g„»Ž B¯£ÑvpD¿z¯Û£›'HòhäIhŠîŽÖøŽšr~(•ÛZbP'k{Ë_¤XyÐoAi 1îhƒçßbÞ ]!îÄ9ï0V¸?ÕJ:5Wö"¹ÚÛR…R¹ÂJ>3Dö4g’4d”`pêÏØIYZÒ$iôã”HƒK,ip€ŒÀHûlè-‡>+B :нÕR˜¶Aˆá½| ýRªÄཊà˨7I½8Zm©'‡\+TM‘—N¯¸ ZÂ3ÔþNòzÕšI-©O RIÌlkÀ»*ž‡–Ý‘HÞŸ8 á>s7nWúw£K3§ j©m»vá$=““ûÌ~#Í}u § w(d$_õÍ´syD蘿WÜn mY.EQäiu¨l>X¥>ôb[õ š¥¶T€/Lù i8Pº×âpä”b3&$¤(]ó%vjÑ<Ž¿ràóII9Ϊ1g蟲®§ëÌZÿ¢,µ½ÃX!! {ã!‘¤mð0ØU¡»¿žîFQ€£> xÔ“âWœK%›Ñ<>‚)7cDï#ú(ÞCuFAÀJUÊIGßiÐRìè+ñôxTÝ]ÿé"÷ú6õ$f›qÎ\JósWn‰aÝ@2 ‡ ­*<üBUóx”‹Çm ˆ -¢ -\ÐÞøK k×Ck¬TIà•gÞ(\8…a ×öWýç«Î7giÜLeœ” >6…ánZwÃ>/ %|ç†ÙL[1¾x¶]ùF’™þugùEž-Vqè'ý-)™Lúeóì_› ]G endstream endobj 56 0 obj << /Type /Page /Contents 57 0 R /Resources 55 0 R /MediaBox [0 0 612 792] /Parent 54 0 R >> endobj 55 0 obj << /Font << /F32 14 0 R /F15 9 0 R >> /ProcSet [ /PDF /Text ] >> endobj 60 0 obj << /Length 2277 /Filter /FlateDecode >> stream xÚÍËŽã¸ñ>_áÛÊ@Û#‰’,Íž6@&˜ @I9ìæÀ–h[= RšîÞ¯ßzQ’ÕêI޹ˆ¥b±X¬7î.»p÷—¡Œzüðñ³R»âXdq¶{<ï"¥Ž§,ßeéé«t÷Xí~ Ô1=FûC¥aðu¶¿í*zg*rü³oÆ¡î»ýÿúñs”.8†»C”U~b^WK¢$ø-T‰uþ¤ÌѵcÌÐó¿5m¿óà;~ Ïé¦áÉÁ3+GkM·OÁÀˆ«m톺tˆÈíù^Í+“ôOÄ·îG×¼òdÝ!j0V—Þ…¤ŽEš±ô°ºR¥ó¢Ûº3üè!`Ú•3ŽN\KdýQe/±È5è´ï˜HtUYã@ÁG’#JŽE’ˆ0éB•I‘gkÊ‚¦F"®Å FþË‚'á‘»5µP¡–q´`]˜Ž‚jD¨ÜGžx)* `ããþP¨(øÅ¹4pa7:£±gS ¡PôÖ쭱XŒÆ‰ÓàÙ7SjÔD Ñöä%€¹jñ„« â€Ä¬Ä‰Àö•'ν¼.‘ÁuM^ 'K’ôJF€ÙºUdž+H´ô‰tò 4Ù) V#)þIÔÁý™G Ó1XËhM£Š'•±÷râíêßÍ=³iêªÝUPú©1 ?‘!f§ý^ÃjOmÍæ‘z^÷ŒšÒÍ7áÂC£íÅ0HîûöÄ™™-O .Ŷ¯uÖ­ßH˜²0:Áœµ(òlØS¬Ò‘C¼òOôQÁñRKiiã€ï˜ âXœôzU K ^rp)éá eŸ0pÆ´x²°€Àio… NpV Å-=ȬáÓ"ê*€PÝ‚X74(ªÑ´’74¥\Zÿ|­IFY0Àc wÒš²· cɲÎ>0nb‰ó÷ÿ¾À I?‘ƒëy„)™Fg´eYY-€âàD€ý …‹|„’ÌDþ%¥j_sÖö„Uéb?¶ëÓä3éVXŽÒPwÿp6뫟¨4ê÷¾3@ñº¥º‘Þ²@ÕX²G'dXÑüµº[ìZ6`YOÈÇB×íÖQØ ’8äðF0â¼£‹tã¼Í›æ³XÐa–6@‚²×q%çìxv„EI”ñß+ h¬ è|P‘tBNa ¹’ëŒPnC[;Çi"¡©‚ªA»‡¦î¾‘ £}ÊPs’L¦€ìÍš3hPÅûqøˆŒjíïû:‹åžøhV• ‚„€ÆÑ±šô¥íÀ΂()¸‰Œ¢œê˜±ŸÐ=ÄwøIȺ~x? ËÆP®K±ÜÔ¬lüáÈ›Š­AOc¸äæóÁ\£qJÑa˜•§íÛÖO–Ú ö™µ…Hî¬ÒÙ£©pη"Î;eã.0»ËGR¤}Ò¤’̆ñC·j=T‹¥îjjwñg¤[Ê’ee Å?lD¡¤nìã˜íƒB¥Pcʼn )0kÁ`uçàÞ@ч3¥Ôaýµ;´¨ã5’‚È܈gÃ!D¢Ei èLf ãëô|‚6b3á¤ÅQ½;qîoÅŽV`0¬dÀŠýMEÒøE¾¹Œg×$úØÒJˆ«Ï2Êÿ2QÁrŽË%g ’€‡ººS°î“ú~£ÁÁÐiÁ;³4YæˆPMÍ#ÂÔì  y˜«ËAîF!GŽ’Æ¢q?cºÈ——IÊ/¡šÓ®î<Öˆ¶¤ŸÎy*`uSE$Ý Ho9¥D+Tp0¶=ah’ZaæÀÀÔ(Ì+}†3KJX¬e'p˪oæ´C!šL!³|ÙLçq©nÆù¢KÈ¡_JHý‚]Fb"É GÈ   …# )œaŒ7¯š§¸þ—3㉛MŸ7/7iߦ«w²ð^…UN®î\äî²¢‡¦­¬6/W 7èÍ+x¶|ÍÀ+‚vÐ ûKoëáÚ®.ôÎg\¬’¨XG¹6 >ïs,éF¢PÉ’„£+è'`zµÙâã)KA’â˜&9Kïi~=¤a|©¼2ϯ+Ý@»¹d/wÄdU"VÜBÚì¤ä'ÞÜìoo:¦GÚÍI_=%œÿe/µ¹×?ÌÝßøíâ¢àíàXG“–ü¯êªûIæ¦÷„ÜÏjY¾< †n˜Q‹âxºô•f‹‹¥»‹©0c}b·…ìßSQqÉ¡ÀŒq~ïPµìÎÀMÂiÿ.qoþiáBMɦš¾6úÍ{ZE²œߨ»õÓ•·íŠ{Ÿûeuƒó³óÆp]Ó<4õ0PQ+”d6ÄB#zkêæÍ—a3WòFirÌŠ”Òâ˜ç¾†¦wD~üð€U‰Ó endstream endobj 59 0 obj << /Type /Page /Contents 60 0 R /Resources 58 0 R /MediaBox [0 0 612 792] /Parent 54 0 R >> endobj 58 0 obj << /Font << /F33 15 0 R /F15 9 0 R >> /ProcSet [ /PDF /Text ] >> endobj 63 0 obj << /Length 2363 /Filter /FlateDecode >> stream xÚ•XYã¸~ï_¡—Íʃ±¬û˜y2Èv° L²É4‡ÙEƒ–h[hYtDiz¼ùó©K‡»íÝÉ‹U$‹d±ê«Ë¾³w|çowþ‹ï‡‡»Í}8…W¤aê<ìœ N¼"Μ4ɼ0Jœ‡Êùì¦ÞêׇŸ¦]Îçuâûî»Õ:Š"·5üýÍ´©Øµª¯í/~kû–×ôדj+¦ûƒf9Mg;ë ô‚¬€oáaÄ÷>j ¬aîšS_ëß´ ;uª+&ëÖê®ßTºÑ«ÀíWëÀÅãÃÌ5;æØuZ3ÕÔ¶gJ·+øí»z¶çEUU¶–¦«tG7¼Y­ÂÖþnñãÊ-;UwÍ™é=2€¢è·’uÓ1ñ„¯Óú„'_{שýQT¦3-<2÷ÝÆ¬à®güâ'>Èy)P<-É÷ï¦×¼Ü*-\f¨Ð òH\Ÿ…£Š…Ô²ÈêFBëj<ƒUu£» ‰SQ´`N,¤ßŽÐ2ÃþÀt½c0>ÓÛªpƒ Z#dÈF†Ò ¬nü²Xœv”¸rÐêÔœßãØ%Íd å Š/5üƒâ=°?+FDf3ϼpP–çÙG.X{µmd’6 Çá–TÙ1OÝ2ƒl'!Aó d%nÃö uÕ2C˜$dKpç^ñŠøª±“Â&àw¬_Å •¶ÍyÝÔ-ƒM´(.|FŠÊže8-Âô¶¹B‡/ 1ÅÃÌË‚Eà¨𧇿~@Ù÷ƒ.Í‘PƒûU»‚ã@‡^D7DQîEyÎg~&«¤Ç‹TbÈ¡ãÑièRJ•ðXM¹âƒºý&mß©^ïÏòºÄI½"‹r ³EîQæÀ«ÑqrEÞÜóÓÔY/¸8ŠÂá »z4¾"/„ɃÙnö³…pà$k¶mègžïG—Æ•÷„оºkUÛ _¢¨£E_˜DA1A=­’„T¬F\†„ºT F ÞEêæÅ àðµ®æ¸ÙÏêž %ß0ÕïÞ^‹d¤ž0—ý:>GWN£$aA³¥( 3åÐi9aÌHs\¢Òem§ý”áÛéú7ÆÙKv¹²îÆíÛkÁìù [vÅ®¸i­*{Ä éæêǺŸÃ˜•ˆ'~©ÛFu{}‘aãe†F—ž&^6;— {%¾û3Ü0ϹÆrDŽÜO¦Hq·*“6eÛÌô±KiDPIf[f{í:aÌ1ÙtO–‡µðž:ÔJ)pDåôJ3§R V;+ý•“|¨n«›ÙÄÏæ*Å'ógTÍßGC—ʘ&4â—?”ñÁ…¢t,[`ò…3ùW‘ˆÓ¥jÇk×’®è:kÎ@Õ5´OEKšŒéJœÔ úü"]`¦‚„A Òi `;Œ5 8_0i¾-'yX³n@l’^‘““ÿ,ŘrS4™þR›Á6çk/å“ü=Õˆ  'ÁäÑtp þ¦_¤,În'`@%9!$?5äþ8Á{ôôäVšÂŠßU]ÆâÃ-¾|‹T³ \9ÊŠ{ȉRxË0DÉ\Œ?†Q¬Æ×ÍÜKXÊ|Ž‹†i†7¾,xªõ\¼”·b%fËLÓ#³E²¢s퀷Í5 Üd™O й¢¥C¹¢]_31Õ~°)E£ÆfÚ&H9d ƒ#/VF•Ìíx¢™,[c²LQÇ¥²NN„k¤a’tFRÓpHù\7 SGÕÏ•)g¬œÌˆ²2Œø @ìu‹×Õ’èÒ cëøÍ’ Y:NDcSs!ßœ¹á{¯t„u#¸+g´ÆtLû}Ïä[I£0î§nGÔÉ ²a‘™Nv“ºpvЦ8zMßã|r!‚¨4¦˜pƒdIFK:R8 ¥U"J¦ Hì&‡  jΡ8ä!užãI2'½-#÷,‹F¹†Ï*Ke©3O*Üœù`u¡¼é*&'e“‡Ž½+[ÊÛ¡ÄŒÀI5&×›´¹½ WÓ¾}Éø€1%/ ~¬¾­“ˤ“ûY¸[Ð+“ÁO솘ƒòõ‡3Õ0ÿ/+(J¥ˆüÃînÑsƒ¤Ü5Ò¤†£ß. ïh.°¯•¥A‚ƒž9‚½ú ž/¤[~†½–Wâ#\‡Tœ`ZmAÊæ(Îý—Ì^½hÎÛìv²¹Àïq»›7<9‚:¥qLw!dç}HÏGKÀ 6Ro6WÿßiÝ>Îg=R!¤Û÷<3 Ô›ÝÉ-™N×íSËRÂ¥8ßtç“>߸ó÷/zÂäñÍ·TªWxËãÿ{ o¼}ÏP·}Ž×솦¡×/U8™˜ŠB¡–ý¸í†Åo½7½&ÇQíëRp$Ù ,úgŠ Ê_H"¥\©»ÎP+`ŒoÞ\¸Èwÿ¹ €ôÀ "ðü4‡oîIì”ǻϿúN‹ ž¹óL¬G`ɼ8nœOwÿäÿ©£ÌI¼"ËäêØË“ 8/‰.þA‰R'>?@¾®_ôò Z´§ïGîG ÅY÷«¼˜þ>ò§ÞŒÖ~¦ éÅzA%ü ‹ElË ¼¯þŒ%d»?™Ck{jiÏŸäûﺱÓAAQ$’€/Œ g]xqËŸ°ýéÝf¿^i½¡×_•õ ÖnNÃ.ÜìU·U{½9r>Ë Vfd€ã±:÷NöF„ºü?M½Ô¸ /£e¾´ÜÿC'Uv endstream endobj 62 0 obj << /Type /Page /Contents 63 0 R /Resources 61 0 R /MediaBox [0 0 612 792] /Parent 54 0 R >> endobj 61 0 obj << /Font << /F15 9 0 R /F32 14 0 R /F35 29 0 R /F33 15 0 R /F34 16 0 R /F37 30 0 R /F36 31 0 R >> /ProcSet [ /PDF /Text ] >> endobj 66 0 obj << /Length 2182 /Filter /FlateDecode >> stream xÚXKo举ûWèX½˜–E=©ÝÓÉ“ ‹Ml`ÞA«ÙnÂj©GñxûßS/ª#Oö`³X,–XU_‹OAüã*–ñ¯wW7ïÓ,¨¢ªHŠàn$qå±мŒ’4î6Á}xóÝj'y8î,Û®iº×>ñtj]·Z«°ÅiºÙnÌCãìæûÕÇ»™Štšk¥¢*ݸ·eùßߪw¦g™ÞÖ]¿¹‡u?4¶ýøm‹Ïw c?ÕãWz“·ô>Û×{ø{@­_+-.Å7f4÷ðÅÍÉ)ÖK_ùc逓kÇ4y#ŽÝhšµ¹ßìÌû5ÎãѸÆö@©õé’úÅ¥ïn¾±N|ó^å'á_'YTÅ%HVQ™V,ù“qŸW‰móºÒiønµNÓÀ€qFêe•”a75žnºé±±"ÔñhxÈ‹õ# ¿Ž²Þ¡^VÞï¬! Yˆ€8ÙUd<>º‘×ÝJ…ûCc÷¶Em£‘Óµ[–-·'y”+Ͷ¤Q)ð³ÊãðgØÔwЙ„Ý`ñã`Þm×L#bòkï`u”ê’uý‚ÞCj#gžá|^åyhz×Mâ¦0 †îZ÷i²[âýÓÛÍTÛKÇn»žÅ¼ri·ZƒÉµ­8ëѳPù3Bˆ’mÎñ@%‘*+:ºÒ ]Eç2÷ë<ŽÃ»ẵ_Æk¦Á? ¸s-êm ØÐv£7âÓäÀˆ‘õ ¯ðzëÝDÕcŽrpOs#švf؉3 `éâ°1Á3Î3¶%Y´åJ&&X‘dšãƒ„k‡‘€†“G²ÍòÄ~9ôv àO£l4›#\Ð ‡ã3ê|É‹Ë~,ÄÆ „”‰>O@ÌoB´†R×ÝĘñ‚²Ë o˜{þiê.ôŒ MUìiÜ1íZ©€#ÁeëJÅFëðÖµµ= -,Þ‚_I@7„ŸÕãñŠ£i(Öÿ½z®hÅÔŒîÐÀVËkÝ–G ˆIªdF nà€ÁbݵGK[gÙKPŠhP“rŠ‘w:Ù˜&<>"VÁ­ CÐ[ »¨A9Î4Íç4°ÛÎ0·žŒÀL}­Yæ3YòêÁ™$‚ ®=hd¤Ô’\\.ˆ\ëðÃ5VCc½¸¦!CP˜€J›àJtx%^|è-x’åeFnÄ-ÇÌÖ@N#óædÂÉœç@s9D¢Û#ûѵ²ÂD‚ŽŠÿ:¯ŒB ³Îo(²5þ’jDfùzm!¦¾²T„½´RaÎ3ÖTæ°:Yž¾ìfžºvã0ÞlF/å©YD6î³Ûˆ26‰£y¨"‰ÃÙ×a)±¸&* Ñkܳ¿hSñ`e¨M#BTUai %á”Ãád߇뽗¦=ûg¬bo¾8ŠÎ$R‚L^ Y:.&-eDr̼u¾™Rª³åŒØNMó€5` j aœ¨ç±"G ÇH)€~PïdÕðâÞîëýiê)a„xü'ñÞ ¶‰Âb‡è,x*φ2 òW•\mo¥ÅÆݰgâ­7N}K1Lnù†T|­g0û˜G¾S™ø 9XXÀU1ó|¦ADjÊ÷²§‚Žä/¼.„h ½$Âeö‰ú Âõ#*AŸµ,`À.~-.Iì4&šNP µ§g–wö"ešŠÀ+ë…{¤ëT®º^n#b/”?TĘ{Fê)e"0éîgª—´exÔBwNnPè|Å|¾Tí¤'E®¿èqaEŸµ½Å 0)ŸºQ–GÌô…úñZ,玈º±¦÷>3ãì=ïý£³°& oî9ªŠ@B«­„àIZíö"ÐõæR©»”¢¾i¡&zOBüa³m$…|ƒµi”EêÏ^jùr ïÍ“«©c¬àÀ&â)3ï-$,÷¬RײӰÀĶ韰³€çâ:Ëý°Roê=5ÔôSÌbfaóˆëH×£|˜¯$¶<æüuîÌl¿՛!LÞñ»í­%² 7uábGtÒ„ùÇ.ÑžVþFFU—â’”ÜÛF–¦Æ¿¢G׌Ê?€€æZzü…¦utå¼rßÂ7¿8žç®fw’Ïr ߬·òz›³æ´³?Æ &¶Á©‘ú¿•_`S,Æ߷ØíT·‘0v-uw@q¦"EmL¯œÎPó9Ô–9y•á³è"ˆwL›S9=?˜NÕS&!£wO;jGxÊéK3é;›&ÊD¸ñ1K%IZ0ÏbÂ,·~óuy:’’5´Õ—òLÁsÍ/`‚PAGs¬¨ë;4¦¦´£%H½v €Æ.!꿆]ißòT\q`”6Gí&¥Yåk30èg"îH¹%Ó_›btrœhfhˆfBrí7Œ‹m-ӤοulÛoÞ§yPDU™j|ÕY”YF…Ó‹…·¥°k}"ůe|[N}/ïfèWlýø¦AÄíP³ð™Ø> endobj 64 0 obj << /Font << /F34 16 0 R /F15 9 0 R /F33 15 0 R /F35 29 0 R /F37 30 0 R /F36 31 0 R >> /ProcSet [ /PDF /Text ] >> endobj 69 0 obj << /Length 1001 /Filter /FlateDecode >> stream xÚ­VKÛ6¾ï¯ÐQVŠ$Š”œ=5ES ‡h}K ƒ+q-bõpIj7nÑÿÞ µ¶jâ&="‡óø8N‚C?Þ$þûnwóæ}ʃm¼™vAÊX\ˆ2¼ˆ3ƃ]|w¶›ˆ1Ͱ‰²2¬ÇJy‘ÄO¦‚¶÷8>9E»ÑªšÎ%7i›4T°0$mõ#ê{mqÞn~ßýôpþ¾yÏò ”Yo“"ˆ²4Þ¦´ÎŒ•ÛD<ã¡«ï÷ƒÕ{£ªÁÔtðzò4. ¢̹ óQ÷Že{ïqïѼ“]‘ì-}RqKNŠ8™wâõ¨NûZ:¹¯5?éZy+NFÉRß Ç}#m3»Nï&­hÍ÷ úäŒÄû¡rÊÙ»5÷h"òÙÁM&}M [ÕÏñVò÷töª=¢µÌ×׋Vx0ÊtN=®ÿR—륆€S_}©Ô«iA´çRûJ”T.þª G£žýÀ…O⥲øl$'ï[µŒ´Vq78Ùb•­4ÈÏa?ß*ç°=ôÊ:X+6¿ºØ>÷?Ólp‹F:\e~æiÝêNÃ|OˆqÑHþ´á"”­®i;|L³Ð¸Þ×@b.h¯½…e™†Ïs×Àq%{:?Ê Å$†Š Ž9AWÍñÎf€A ­Ì‡îQiŠ -ɨ{E-9lòmeñJ·$ë”ì)l†Ñx©îu‡Úc÷ºÿóÐê?½Ã™LËcÎÌhc*”è\ žñ¸Ü2g`ò$üÍ‘t£]© Þ·ŒYéõû¡;¶Ê©9BP6²9D—¥˜C”úg·Ùæ¡‘½••ÓÃTŽ®:íhý«úcÔfz`—Óç)Æžúê˰vOL5£¨&”3·‹ Ó£]ÓÍ™\rHfqZlÁ=ËüMÒx©ó!âI>ƒ" Œ77ÐDŒžÀÈøl—x·¾y²U·xãëìÙ¯g­ÌuŽó¯æóÅWí/ñÈg¨Ã¾WÏ‹œ½ðÌ¿Âߘ·bÕÞ¨îáe‹ëèP^‡Œ.»ÍÃû:5çùÔËóH÷ˆÂ‘Ș‰ä%¼áDb L³+5òÑ68Px Û–˜gÒ@Êš$Ú>zgÖ‹êôùÓôÜÐ ;DZ¤"áÝ\¡E£Zé4e²EZg À>NüToýI >ò<|7:ÒpþObVÃØN¤<FP€jèI’InÓɾR—Úƒsðæ©‚‡hB¸Yƒ )Ë[eO֩γ«+L{ã‰ÃK!ý¯ç#". D,‰·ÛÔ“F¹PúawóÅeÒÅ endstream endobj 68 0 obj << /Type /Page /Contents 69 0 R /Resources 67 0 R /MediaBox [0 0 612 792] /Parent 54 0 R >> endobj 67 0 obj << /Font << /F15 9 0 R /F34 16 0 R /F33 15 0 R /F32 14 0 R >> /ProcSet [ /PDF /Text ] >> endobj 72 0 obj << /Length 1718 /Filter /FlateDecode >> stream xÚ­XIÛ6¾çWø(cE»äôÔiš.‡¶À ÐCR´E‰Ê¢+J™¸¿¾o£,5¹´ëñq{Ë÷:Z=­¢Õo"ùÞ?¾yû!MWÛp[$Åêñ°ŠÓ4,‹jUäe˜¤ùê±^} Ò° ãõ&Žó(øm=¯7iX§k ª4x°ÍÐÛ®ÿxüùí‡8Ÿœ­6q¦UÉgý¢M{Ò4¸ïì:©‚ç‡YG&e`M»†Oïxz&ú£)ãx‡ñ‹ð˜Ïø£»´jìàîxFµ5/µms‘Å­fÂ]Ú½œ*É‘­Öµ®ß¡£‘VqÆåÔ¥P'T#çk>nò( nàüd›œBtzXTK„¼ðD­z…‚FI ºuªujO–¤%{;´¸§×3H#"øãúζO²W5}þrɹö€’¢eRq¸Íů$"Ú¢ÕÏL H/4ÛŒ;Q}Ù™,ªÿ ÂàIWaJfîÆç£mäöYíʼnÑüÂô• m§ýɳÓijí‹IÔo§Ük—‰s³ÅË~ï=w¦Çã¢XÎÂ6µv=û >Ÿì æøc™ˆxäbTäpÇlÒazÜQ«á€ô§(L­9f̧(Í "_s÷àŒ÷ÎhäÔ!L%'Àƒ;bñ™é³ý€.¸3uZœJªObôß.B*_4:FéבQ,îû} YI5Õw†tV¡·/Ö Efõ&ø¿óAFöLŠ"8ØŽ‰[×"÷¤ÁÂL²·À°æp·s®¢Q=ái4z§·Ðt X#x xÙçu^ª15oⵈæ@à´¸GDl#Ñxl!àˆ…€u›XKvƒuºuC'›%¾kzæ•cÆŽà­uË|u>7¨®Ãõ¦ˆËà‘1ƒ[Ý’Ð' õ,Éý…k\l>£žÄÇŒ«Z^AÊ ±ºNì-Œ©*|³¬CË$f¦¸^à¡"6³kªÈV²ÅrÔê´(ã*ø€¸0,(Ð mËßæc"¢J¥œÎ‹Àé³êT¯y‘t¤)¶›š—íUËÜQ: TŽ‘B×à:ÓÖf?§drRdÄô9óÜÙ§N;÷ G å¯1ˆ¨îS5õu•ç Ë, $8(¨F—’ï€kn’…/ìOBî$f½ šäaµM¯]I"]ÉCPRýà¾Ò€T¼í½>hEíoHV¬}â¯(ê«Î/??¾¿ÙÊ4@˜m‰÷²‘*Ly#šNS¡ˆ±n‚œÏLïíéDæ•Ø4á3fAfìùt‘: Ê1®GÜoŠ$…úÈ¥3‘ÒšJKIbâpD£Ü…Ws™NçØg–“ ådEË_5“U"Ò™“i”$tºYa6¿¬«,XˆcÑ‹zR•SaqÈ®°siøÈ|Õí $†ŒLÕ>i9†*[å;&`ø´s't¡Ž§ù]õîÏJ«T§Wã·¶lC~?æþý˜ÏL ÃNÿ=~yäÒ˜ åà!1Æ¡¹òÃkõ„ª®®É„9ÜæÝlæ(Ĭ‘ñ¢-@ê0ôgâ6ñ¨ž',ÈÅù`?˜í…DúÚKp¨ ¯‡—“ G9r–)¤íB.uGÈÚ™§›ÍgØìÈEKMž¹f÷I{‘Ý-£áóúùÚç_‹šÓcFŸ>L0¡ø>…»»†~Éô {z4H¿ô2¾|Í›Ä%Äñ &tX,J}÷bC&U é \´€a ÔŸ©îf’°赃~po¹<‘ÄøZ¿•\{’ÿÚoMþT)а*‹äÈ0߯ò¯Êv¶èûÇ7ÿ©xÙI endstream endobj 71 0 obj << /Type /Page /Contents 72 0 R /Resources 70 0 R /MediaBox [0 0 612 792] /Parent 73 0 R >> endobj 70 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R >> /ProcSet [ /PDF /Text ] >> endobj 76 0 obj << /Length 2064 /Filter /FlateDecode >> stream xÚ¥XK“㶾ϯà-TÕˆC|&§õ:›ŒíÚuvT•ƒírA$4B†/ähä_ŸntS#MÉÞMå"4  ÙýõŠƒÇ þqóøÍææîƒL!¢*Ë’`³ „”Q‘—AžQ"³`Ó?…2ñj-D’…›U•†VõNÕ“z·ZË" ß«¾&¢?0ŠP[5i☞ÆWÀW+Z8Ìm«ÛÕ/›ïî>ˆ,¨¢*OrT"Ö¢ŒdYÐõ›½Á{„ «¤ ‡¹mpš†ÛÕ:)BM“ÙÁYp¯s4ß –6µÍö©"T|Œ™hÜ›‰…§½æ¥¾Ñ/DZ]¶Yn6Óž¨ÿÌn¢MŠîfžk¸þy…Ù#~•ÿ4mNß1ƒe@ÿ ­Öe‡÷°3-ü.<õo#½u4SCÎ<§GŽâçX¦µêqQÝØ¢/‹Ì„«GZEàhµÚgÓ?ò¦V¹½v`ž,ÎCÕóeµc^|Á›)-Ày¨jKôô dİüj¯Äþúç÷ßß’Äaoj\Øvϯ’'ÓÐd­j4/y+ÂŽ,î?þÏŸœ>aÀ?LjšÝŸ`»¤Mßê¶ö5 .ê ^PDeùZYWئ斋È?!/õ ÎÜ/0ν/:iø0o)þÆÉæ¿¬ØÆƒ?á÷=ZmžökÓ»\ƒ&Jã°Cëƒfl šIM†ò9-ïI)8`vË–-'Y¤§fKË0Bb±ý#M]ëCë@3 |í¨$èÚPFeI³»†’ÃìYž2ýè³¶Ì2‚;Œ>L`ÌSÝB•Ãd‚ÊEx’'èØ'GS†Ž/n° kÏçy³ÁßG΃O^!¾Ö™ßYÎ~F ¦£5ÝÕÌL^èð—/·XP)'Î Ä9²Të¢:­0 =“†W»úu.)s;»%‡¼î{ÞÓ4±t‹R¥7² ¹7{ôÒ#ÃÔ“‚Dy-½\D¢|MÌi8R_0iN,¯¾…Åò'íðŽ’’·„èQ’‚uGÂÚ>›šãYM“b»û¹áˆvCÇ›o¿áÐwÚ}]|‹ÿ³ ]Ä4ßéþÉø4 “¥Ž?Í£$ŽÇé•yT²ôm§Àº'å‚Í[\¹´ˆ2Që3)Ã7)vÊãwifd\„n&wh»mëμøîgÑÑUï~ú Ä¡ŸUÅ8?õk‘ÄQžW—@þÁ”_ð¢ŒÃÂöÐú¹W%Œ¥Únp15•n{œö^!Ïkf¸R1ÁM{åhÕÇ!XÖM=·Ê’ÄèçO›Œ>I«éZÐ-EoR¤æRÐzMo&B!L i NÒX²9‡ýÅW½ì(úÐÑÄ Ûå2ÎX€—˜/–îÝ:¾â<ž@ò°d^¬Ö˜ŒûkŸuÜçÓëSP`Ì]†Vž®I Éëx”H£*M/ƒz³4HøíÔE¡êÂ7*§Îë¬ÌBGÑ´+n¸ ¬ý•-äŠÒ,ÜÙ¡[:¸Ž£yåζַ ¸s%ù£©QO ù·ªÖžæ&9Yšd öúôˆÀjaYÄ! hŒ!h,1H1OqàbâñïuÈi;tV*}Ëå%!Ÿò1Š­™¦VË™ÃÕ‚XšôRÙÑ[ RÇ™Ÿ(¿‘µÑ„ˆ—¯ÍmÿkïòÞ¿¢ð}öU½K½ËgÝ_&|ƒÂod|í>¯ ŸjÿÞu+ÿlòïpÍοžý¥/`+‰½Â•„ç€ 5å ΰKYH“œüñó•è<þÍËÐÌúŸÒC• m·U\Ó´\0–áûÜäÉ@#4–[>óÌ©B˜ÌþèiÖh´5ìª jKù1è“rÞïh‰®@Š®MªÜw³È9?Ú-¬³˜OdœžŠ;&>’X§ìQ¾øÃ*Ò“²u…/-.ß”³®~ÁПrŸò^nÜß°ë)ÉÌÅ…™e“uöÚ·6ÔyÅüOE†/ØåíîWª@Ý ­sëSPSDjå.->Y饉‹¹9Øœï®u5Ãϱ”ÜŸì9§Ú%‡ ¢®äN6ÆýŽ÷]ùtdkÛ™R¥»}s~Ã-QŠ EXžvêô2}ó¶iõnZ~£ñï(\\ ¡á5<ýÉõ÷ÍÍo7È8§¿·p¬DÔÝÍO¿ÄA‹`‰HVepð¢ˆQZ&@·ÁÃÍ¿øÏ²"È i)|XŠ4ʬd…¤J/Ú™C³R±@ÁzÈQ)87íW¼=½»ÛÎÒ%ýF½žî<ÀÃa{WßaD ¶O`Õ¶Œo’´QQz‚»!òÿ IüÖ&ÿææ0, endstream endobj 75 0 obj << /Type /Page /Contents 76 0 R /Resources 74 0 R /MediaBox [0 0 612 792] /Parent 73 0 R >> endobj 74 0 obj << /Font << /F32 14 0 R /F15 9 0 R /F33 15 0 R /F35 29 0 R /F37 30 0 R /F36 31 0 R >> /ProcSet [ /PDF /Text ] >> endobj 79 0 obj << /Length 1938 /Filter /FlateDecode >> stream xÚ­XKÛ6¾çWøX;Ö[NOyt‹-‚6ØuÛCS\‰^‘H—¤²ëþúÎpF²¼ñ&Ú‹5ÉáÌ7z5»›­f?=[ñ÷õæÙ‹Ë4­—ë")f›í,NÓeYT³"/—IšÏ6ÍìÏ(]ÆÉ2ž/â8[Gï­ÙÏi' ª4º1mï•Ñó¿6?¿¸ŒóÉ~«Ù"®–iUÒNײ3ó¤Š>ã„ÕiYÙ*q«æqÔ*?OÊè€ü4ºë…½”îb:)“%(³¤³Nt‡³’|Y­Ó‰Ö k}ã…ïÝW¬hÑÓí[éå°2‹ãå:Ï“á€rYUG³¤¸}’G—óuÕzžÆ‘oAÓ2ÞøÆQ¼OJß÷Õ,×4ŽF¿~ÆYiwR4ßÖí}+ê°Q–呠϶& µ4láP°`¼OÄê¤Ðîd;8/;¢kѶ@–I$XªÁ=À;<´ƒËÆ£ó,ÊrmvŠ¥†¯¨x‘ z0©î{8ý çÀr$RØY8M`ÞïðŒMxð»5BÊÑt-Xî–t#¹­€‹Xšø°ÊW’`e~ÇOƒb8ßÒ­Ø2(Ö êÔ¼ðgU?*b@Ð ëá8À`°ÛUà'‘ßIèum·º dzN^ ™`P ˜“X °è¸#À .GÆJ¯j¶!ˆ f8£/DS§´ð²ù¾Iÿcd38 ¨@}âl¹Î²S¥þ˜ÃfhžUUa{$É@ñ…¶pQbÜ+¼}º*"·Wúˆëq®ßïÃ"c=6Ž#¥‡my)l°©…3[únÞ¾¾ J膎S„‚Źk4ªÑÏ=e¢N|š$±é> kGóú°JSi%á 8ò¡–{Oò˜î4/¥$×od½ ¦IGkÚ²ö“û¶y!cGµéÛÉ"êóFàHƒ† RÛÞËš «Y-[OMÂ=@ÛÒä;ExÀ s&9ø³÷4©XžÒГ–õ;ˆ¾E îÙªH·Æ1%zo:1€C¸d+Ùj{^µ“š%é³·|_éq…‘ĺõDþÄ<˜ÞòÒ›´|¾rƒô™ë¤ßÑò€¡’áU­'ºâl¯1¬ÔÆ’ÜhÁ䔬‡ÒÏIuäiÓ씒œB‰¢¡©Û!›m ,ëEN”, rz•‚®Bµ ¶¨ µ ¸ÊÊ"àŠÀ‘•¤r8ØRt6}Ͳ‚—ÀÍíBm ™u~á|Ыe±GQVêp@ö¹Rîi‰.•}ÀjV÷D,TÅx_ vÍF%N0Ü@4À C&6Îíd»§¹G¥íDKLLä¼ûv8öëÞBÄ“eÎh9*„ÉVÐ À`í"¡ÔXI ge+nÙÃБ‡Us/A‡Ž„ìÂÝa Ç?’c±$¾v bV6Xïãu0ƹz·“xz‚'ýÝ«0Àš„ßW¯¯ˆ s…¾c“Œ¾#³À åFçi’pP‡cÜ¡¼.½U¡ª¤3.±ò¹£6çÔ×rÈ Þp"6”g¹Ø¤¡Ð“ÄŽ3³C‹néܫާ¯ù¢·)‰á¾·W̸W¼1aoõ»ÆG½N½5Tc€¼<ø½…6{d(чoWäß1xMË©œ¢‡“8Ç®#¦ QQŽQÔÁÈ"¸Í÷›w?¾ºþxuùñòêúfÃ4·DŒáÊ‘sÆQˆlàBDÜa”Z)€"JËu`!j€PVv{¯‚F0 8ÂJ×·ž¹|IC#Á¢Gs‡€† XžS›Ûd…í{CTyøU‰Ü[e¬òêÉÒCŽÝâÕCñÇh3ìÏ£/@®ý‰h 9 ®¨ ‡ÉÖ„n¨3ÖEäYy.y‡‡üM¿`ø /®ÔW{ @õOwd„ºã1QÒò’¤AÚ6¤´}”@õPø”êXÉëTÈBD\^¢u²H|X%+g¶€†ÐæÅißÒ5öÁÝâé¢S×-…o” ºmwÀàÖ¸º…^€à†æ¸ß©i¿Öq•öC¶²Ä`@lù†² £è Ë®õCö1lëÁö”±!ËP!£Ý÷µçÙÿóð~¢=gàü¦¬¾ÇWCè—¡¡1è)ƒÓ7tFǪCS[Âôô]sÁ¡øˆ”g›ÅE†ÄÓÞuðá¾üVy\°×¾8xZ 2*,aÚ¨æQ àÔèœ:%oº%‰ÚXÛïÑOÇ˶»E†ÿStŽÈÉÆØø õÁ/5è*F«&f cxʵž.¢Û"ÕHW[Åæhˆ§tº\_ÌÑ%¡y +lø ޾<Ç„êËQ›® =èIƒ Ø?‡ºƒÁê%[ $Là‚\©Ô{~øÄQ¡èº —8=Áðx=¨aLj+ØÁû³2´„ø ´e§µNZGlÊ£á‚Uh50-C¾`éà d‡—äÐ"¯si±Qƒ·âÕöÑìi|àzwšÐ²2Fi=U¯0þ7‹‹bY•Ål‘­—qÂQ K§B?nžý îú1R endstream endobj 78 0 obj << /Type /Page /Contents 79 0 R /Resources 77 0 R /MediaBox [0 0 612 792] /Parent 73 0 R >> endobj 77 0 obj << /Font << /F33 15 0 R /F15 9 0 R /F32 14 0 R >> /ProcSet [ /PDF /Text ] >> endobj 82 0 obj << /Length 1055 /Filter /FlateDecode >> stream xÚ…VMsÛ6½ûWðΘ4 ’"Ù[3‰;é¥F·¦ÓHHĘ"9Ùÿ¾ûÉ’ë$›Àîûððv¡,:DYôÛ]¾¶wyµi»‘›h»ò¢HëMmª:•EmûèoqŠe#æuìã¤Ì1͵˜´îy¤øã^â$SŒ!Lã/qRµRègã¼™¼Ïbç8‘µè´sÚ±í&U-Ó¡á‰ëÂV?è3€{冸Ÿíïpª$ÏÓ¶ ȽÚ[…PSσ½E:Æ?0”bPob†}–ñÛý |J‰ò2mËò6Û¯¸Êã™6ÂÍGÍ£Qymyøq_3Qà=ÎäNÖ•pˆê¹ÃÈ+p Îh9ñÔꎦßðŸ¶/l÷ü%zÈp^Þ+¯xRÏçH¦‚\ó‚'LÞ;"rå0^НY•ñ´ÔBV`nÐû `ÕZ…Y_ØÎ0ËyIFÍGÙu˶aϯYQŽ#"ë!iŽ0S¸wYŠ?B®Î‚ˆÏJø~÷ï¼²ô„ iÍ;â¸â¸hZ§:š 5—BàÑz§žü˜ —¸{”¢ •Ë2†Ua7XfÕäTçÍ<9v­|V3òÔxÔSryûy=s£û³fY¤LÚ·¸Bµ˜žT=ƒæ"rë‘´ûðXȈ‰XóCÖiÓZŠ4¯€â\Vb·¥°ª£’-@#ŸÙq0£¾gË_zU\äDÔ%²Ÿžñºï¤FN~Óoð|MZ45çÞQY!ºÕZ=…z*²Rl?~`”z™ äO¼9êÅ1a&ƒFrÏ‹ýå ´Ž´Ñò¬åèÕ³Ó„ï¼÷¨cî¬VO!'ä~çVpQ™gÂPkCƒƒž°ÿô<Ùá¸×{µŽÐJ’ pô™Á£CñÇ™£•åí@§ØaiuSÔó’ n„­è@ô¼’N×@v[´qH Ÿ_ïâaA³¡¢á#!õÀ ú„,6oŽÍ½–î> endobj 80 0 obj << /Font << /F15 9 0 R /F32 14 0 R /F33 15 0 R >> /ProcSet [ /PDF /Text ] >> endobj 83 0 obj << /Length 164 /Filter /FlateDecode >> stream xÚ32×31V0P0SÐ54W02V05PH1ä*ä24ŠÅÍ¡Rɹ\Nž\úá †f\ú@q.}O_…’¢ÒT.}§gC.}…hCƒX.Ofv> ††Œ0`o`oàgàgC(¬€Ã@ø+É ÔÁtBÌ™2™ùãÆ@Å@;e€v³30s¹zrr»¨2­ endstream endobj 84 0 obj << /Length 164 /Filter /FlateDecode >> stream xÚ32×31V0P0QÐ54W02T05PH1ä*ä24Š(˜™C¥’s¹œ<¹ôà ͸ô=€â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. 00X0È0ð1ð3°7070`|Àø€áV€¡ ÊÁ¡<òc… „:˜Nˆ9@A&3`n`oàÚ'´·hÿ.WO®@.ýÎ)V endstream endobj 85 0 obj << /Length 98 /Filter /FlateDecode >> stream xÚ35ѳ0S0P0SÐ5´P0±R¦ )†\…\&F@ac¨Tr.—“'—~¸‚‰—¾‡‚1—¾§¯BIQi*—¾S€³‚!—¾‹B´¡‚A,—§‹Â8€Lr¹zrr€Š#y endstream endobj 86 0 obj << /Length 124 /Filter /FlateDecode >> stream xÚ3´Ô3µT0P0SÐ54V04S°PH1ä*ä‰(B%’s¹œ<¹ôÃ,¹ô=€Â\úž¾ %E¥©\úNÎ @¾‹B´¡‚A,—§‹‚ CÃløßPß`ÛÀ†Ì Ì Ì ì l | 2   \®ž\\M¡ endstream endobj 87 0 obj << /Length 90 /Filter /FlateDecode >> stream xÚ3´Ô3µT0P0bCS …C®B. Ïȉ&çr9yré‡+Xpé{€O_…’¢ÒT.}§gC.}…hCƒX.O›ºÿ@PgÃåêÉÈoÎ9 endstream endobj 88 0 obj << /Length 160 /Filter /FlateDecode >> stream xÚ36Õ32W0P0QÐ54W06T05PH1ä*ä22Š(˜™C¥’s¹œ<¹ôÌ̸ô=€â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž.  ˜„ ˜„Âì@ÌÅ|@ÌÅ2@,Ä@lÅ@œÅ0…‰ËX˜‘¸Œ}€ß¥ÈÆbs)²±8\ÊÀåêÉÈ6·'ª endstream endobj 89 0 obj << /Length 98 /Filter /FlateDecode >> stream xÚ3´Ô3µT0P0bCS#K…C®B. ×ĉ'çr9yré‡+Xpé{¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]lêþA 2€ r¹zrrª0B endstream endobj 90 0 obj << /Length 101 /Filter /FlateDecode >> stream xÚ35ѳ0S0P0Q°P05T02UH1ä*ä2‰(šƒ%’s¹œ<¹ôÃL̸ô=€¢\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ÿAà„d *@6™ËÕ“+ )£ endstream endobj 91 0 obj << /Length 94 /Filter /FlateDecode >> stream xÚ32Ö35T0P0T04V0Òæ )†\…\†@AˆDr.—“'—~¸‚¡—¾‡‚ —¾§¯BIQi*—¾S€³‚!—¾‹B4ИX.O…ÿÿÀ—«'W Sü˜ endstream endobj 92 0 obj << /Length 148 /Filter /FlateDecode >> stream xÚ3¶Ô3´T0P0bcss…C®B.cS ßÄI$çr9yré‡+›ré{E¹ô=}JŠJS¹ôœ ¹ô]¢*c¹<]þ```ÿÿ‡A¾¡†ÁŽȪ?€ã Áü(Àü€Tâÿ ¡`‚t½Ô!Ø?0üc¨ÿÏÿÿ˜àrõä ä†è]õ endstream endobj 93 0 obj << /Length 265 /Filter /FlateDecode >> stream xÚuαJÄ@à da9’Ö"˜yM¢9M!8O0… •…‚Z §(X„»íîµòúû¦Ü"$ÎnïßÂÌü³;/«Êé˜ šQyJO…zUeÅÅœøòǵlTvË•]z›+zûxVÙòúœ •­è® ü^5+‚èÁGÎtæ\DγÐù)½Óþ§Ÿ±~Þú¬M¼•³Í½k¯A=aÛy5b‹šŸ6Î@#õÖ¼»¼ï·CñÝ!A°3؃”=*ÄA5ö…Åb€A‹QtìFh ¨ïlú匜Ò"Ááp;ÌjÈIª£TWq×JÓS/&ãþ°Õê¢Q7ê«U endstream endobj 94 0 obj << /Length 162 /Filter /FlateDecode >> stream xÚ36Ö36T0P0b#Ks…C®B.#ßÄ1’s¹œ<¹ôÃŒL¸ô=€¢\úž¾ %E¥©\úNÎ †\ú. Ñ@cb¹<]ÿ3°ÿ ÇøÃ‚áOC]ƒÝû?HÈ€¡ž¡Žáãæ ÌØø@HŽAŒ$, °!¬ lÞ?r‚\®ž\\S- endstream endobj 95 0 obj << /Length 202 /Filter /FlateDecode >> stream xÚ]Ͻ Â@ ð”… ¸*ÍxWm±[Á°ƒ “ƒuTtÕ{´>ŠàèP> stream xÚϽ‚0ð#l·ðÞXÀjÂD‚˜ØÁD'㤎]¡Æ£ðŒ µMÚ_rmî9^,‹¥d/ß_Ó=ÃòÜÔ©-íÇ퉕@v&ž#Û›Wdâ@Ÿ÷÷¬:n)CVÓ%£ôŠ¢&­µ0j ñIÑñ`õ”sr6Îö› KÍi>9é­­„ÈÌgÌ 'ú´Ò+­ o÷‡rî*ƒ4—+·ïàvï¬îžðT™m endstream endobj 97 0 obj << /Length 223 /Filter /FlateDecode >> stream xڥбjAà9ÈÁî œˆ{wl"Vç¼BÐÊ"¤RK‹„-äÅæQ„¼À•©Ô;A¬m¾bvf˜Ÿ“ÁÈsžŸRöCö/¼Jéƒ|ŠI[ї冊ŠÜ‚}FnÊäª)}~¯É³1§äJ~K9y§ªd ß@€ÓÑ¢Æþ lmþ9°ü¹˜ÿÐ%QX‰@|ц¹Hú5`$âÆ£Š{Ü_½ìÌ;kµ»Aï1µmÕ;£Özÿ4KšëšqÍ«mŽ„Ð…áO@¯Íé #"P+ endstream endobj 98 0 obj << /Length 175 /Filter /FlateDecode >> stream xÚ35Ö³0U0P0bSS…C®B.3 ÌI$çr9yré‡+˜˜qé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÿÿÿƒHþg``þÁ ÿ€ñóÉðLÖI{0)ß"ù!äÉþA2@"€HFLò$†i(6"¹„ýÂ…7Û#ùåÔÿ‚üÞÀ .WO®@. àm˜ endstream endobj 99 0 obj << /Length 176 /Filter /FlateDecode >> stream xÚ31׳4W0P0b3S…C®B.# ßÄI$çr9yré‡+˜qé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÁÉüƒùã†0i&åÀ$ɆA²ƒIfi€L2 “’ñüÿABDPÔ ™pd`lÀN27ÀÝu„ä“ò`²ä_Æ`¿ÃH.WO®@.ú_‰ endstream endobj 100 0 obj << /Length 158 /Filter /FlateDecode >> stream xÚ31Ó30T0P0bcS…C®B.rAɹ\Nž\úá &\ú@Q.}O_…’¢ÒT.}§gC.}…h 1±\ž. ÿà˜`þÁÀü‡ñC ˆ°r ‚N°¡ì ‚HH ÂDT€ˆ ŒÿÿÃ0*aÁ€© A0A0ÿ { —«'W ÃS¶ endstream endobj 101 0 obj << /Length 219 /Filter /FlateDecode >> stream xÚпNÃ@ ð/ÊÉË=Âù Mšñ¤þ‘È€êŒ E $"õÅÜ7©ÔˆÔ…)á“•å7øì“?WÕå|Î3.ù¢àêŠË?´£²ŽÅX®íåñ…– å÷±@ùõhsÃo¯ïÏ”/oW\P¾æ‡‚g[jÖ ø }"H;·¸69 ð!ø‚$ß±KÒp’ìWçRñf $L¨_Qü±WñýЛú?ªNÌ`¶ª7Eu2m;iû[ÍåÇŒAóâ¬Ù³ƒÞaÐkøØE›†îè­Ý\˜ endstream endobj 102 0 obj << /Length 103 /Filter /FlateDecode >> stream xÚ32Õ3Q0P0acS…C®B.#rAɹ\Nž\úá F\ú@Q.}O_…’¢ÒT.}§gC.}…hCƒX.O…ÿÿ?óÆ€€öBÀåêÉȉ%E endstream endobj 103 0 obj << /Length 137 /Filter /FlateDecode >> stream xÚ36Ó32T0P0VÐ5T06R01UH1ä*ä2² (˜˜Ad’s¹œ<¹ôÃŒ,¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. Ñ@ƒb¹<]êÿc†ÿ u†-€ø Á? @\ÀøƒÁ‚ùƒûöÿ Œ¸\=¹¹˜Ž7Ø endstream endobj 104 0 obj << /Length 221 /Filter /FlateDecode >> stream xÚEϽJÄ@ð W¦0/p¸ó&ñŒÕÂy‚)¯²+µü¬“GË£Ä7Ø2BÈ:óOa±?vv‡ù¨«³‹K)d§§>—ª–ç’ß¹j4.,´§WÞ·œßKÕp~£¯œ··òõñýÂùþîJJÎòPJñÈíAb Ô-«›™È…d"ÊHæ)làÍd07½™’y·°ÁÔDUÕ÷l4ôÈùœÌíŸüfìu¡NÉ£šG/‡¾fHW&™0Õü/-°ƒ~µ7¶s£íëƒîžÄ¸Ê×-ùc@kï endstream endobj 105 0 obj << /Length 133 /Filter /FlateDecode >> stream xÚ31Ñ30U0P0bCS…C®B.c ßÄI$çr9yré‡+[pé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÿÿÁÀ&˜ÿ0000þ`‚ !…°€ â`ì˜0Èø‚ËÕ“+ É”NÊ endstream endobj 106 0 obj << /Length 217 /Filter /FlateDecode >> stream xÚ33Ñ3µT0P0b3#S…C®B.S ßÄI$çr9yré‡+˜Zpé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÿ````ÿàÿ Íÿÿ3˜þÀÀ¦00îÒÒHô| mŒ…îo``HF¢Û00F¢›qàÆ ÌÌHtÃ6¬t  mÇ •–g0@¡ùPhö°ÒÌPhÆÏ Xi†ht ˆf>¡AWÃÀþáÿ ÆÿÀp…Ò\®ž\\°ö endstream endobj 107 0 obj << /Length 218 /Filter /FlateDecode >> stream xÚMɱJÄ@àYwðæ ¼ÿ ÜD“ÃÊÀy‚)¯²+µ´P´½‹Zú>Š©|Ž«¬SIŠ%ëÍ®¢Å|03åîÎþžfÊ”™¥^år+ÅtÓCåqy#³Zì™S±Ç›Ul}¢÷w×bg§‡š‹ëy®Ù…Ôsõ=ï¼£ÆUk …ñÀæ½ù󭥯kúíèSO£Ž6Cp\«†NZšF×tÜÑ$ÚSã(¢Cp¬žé$úAÓO:Ž~Ñd æŸˆ.ƒ?&-°´¶àý¯#ÈQ- ùhTO endstream endobj 108 0 obj << /Length 207 /Filter /FlateDecode >> stream xÚ±‚@ †KHºðôô¸ÐIÄDŒ“::htEGáˆÚZf“Ë—k{íÿ÷âh:O(¤ˆ&–bKQBg‹7Œ$Éé™VNWÌ 4{®£YsM±¡ÇýyA“m—dÑät°±È mÀù¼™^TAå5 p¸ÐËq:€\S?¨üÊÖÒ¢^“ò·U¾„²‰"äôÿq> stream xÚ31׳4W0P0bS…C®B.rAɹ\Nž\úá &\ú@Q.}O_…’¢ÒT.}§gC.}…hCƒX.O…ÿÿÿÿ`ÿ˜0È`üÁÀþH0~?€ÃñDÔˆz¼D\ñ˜`£À†YIJ¤„6ó ñÿÿ(ÁåêÉÈ´ÐZ¶ endstream endobj 110 0 obj << /Length 194 /Filter /FlateDecode >> stream xÚ}α Â0à“ …ôï4¶R…ZÁ‚N⤎ŠÎöÑú(>BÇ¡5—ÁÂñ ÉñÿªQ4#Ÿ&4TŽ)é¬ð†Ad} ¦îçtÅ8E¹§ B¹6Ï(Ó =îÏ Êx»$…2¡ƒ"ÿˆiBu]çF JxtJèå¬÷fEÁ6£YpV­êï¾—ã2]>wÙ^ca{µundv3û½ç¯E»‚-YÑè sžîryÕgæn0q¸Jq‡Æ-q… endstream endobj 111 0 obj << /Length 212 /Filter /FlateDecode >> stream xÚMËÁJÃ@àÉ!0ó:/ »K¢¥PÔ æ 詇â©õ(¬E!‡bóhy”_`nz(Y'TQ†ï2ÿÿ—Ó ?eÇŸ{./¹ºâ§*WMŽÉú™æ Ù%—Žì­¾É6wüº}{";¿¿fOvÁ+Ï£îœIò~ œÈ)r™!“FA‚ üã¾~¼«½ª£€³Q¤ò”`RêPë#KcªÓ\gFa4ÎwýÑìWüÓê ÕbТ 0ñ³Ëú)÷úB@7 =Ð7b†S. endstream endobj 112 0 obj << /Length 141 /Filter /FlateDecode >> stream xÚ35г4Q0P0bKS…C®B.3 ßÄI$çr9yré‡+˜˜qé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]êÿÁ(ÉÀ€ùG døQ"- dˆ”€@¤ „<"yp‘ ’ap“ò@Äÿÿ $—«'W ÀO endstream endobj 113 0 obj << /Length 221 /Filter /FlateDecode >> stream xÚeÎ1jÃ0ÆñÏbx¼fË»@")‰J¶@š@=Ú©C ’ŒRÚ5 =—ÐCø=;’]"•ò†> stream xÚUбJÄ@à \`8îÚ+ÄÌ h’#æ¼*pž` A+ ÔRPÑö²o`ë£XGH™"$ÎFÐÝîc–åÿgVËãlÉ §|”ò*åì„Rz¡|-ijü÷åþ‰6%Å7œ¯)¾1Åå%¿½¾?R¼¹:ã”â-ߦœÜQ¹åaÐÑШ¡ýCØA @a‚è{`öÕ0uÐ8ß ýˆSûW}$ªÏ‡ ¬±s¡-j) 'l\ÌÍTµJ»¨ ¬è¸h¡ hì'ZÁL ÚŠ©‡N kÉ>ѧ‡^ðeQiÌ… axˆÆâP‚}HÛÝv:/éš~£epÔ endstream endobj 115 0 obj << /Length 177 /Filter /FlateDecode >> stream xÚ]‹; ÂPEo˜Æ ˆo6 ù<‚Á‚Vb¥–Šv’'غ¨tnØ2EHÑF‹Ã…s¸:ìöXsÏg²öyãÓžd½·>e½£8%w!܉hrÓ)§-¹ñlÄb^ÊgEiÂh§ÉѶ tP"ÂY~‰Ð…þqädXµ‚S(¨\!º(i™PCÙÕ‘] Œmð¼©ë½j7­Òª¬SšÓ þn1ß endstream endobj 116 0 obj << /Length 182 /Filter /FlateDecode >> stream xÚ3¶Ô3´T0P0RÐ5T06W01SH1ä*ä26 (˜˜Cd’s¹œ<¹ôÃŒM¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ìþà ~ÁNñHÿßÀÀ~žùûÆ ì?jØ?0Ô1°?`°©“ 0‚L @èe5ùÈ¢ ?Ø÷€ìåв±ÝÂÀø‡ËÕ“+ ùÛM· endstream endobj 117 0 obj << /Length 167 /Filter /FlateDecode >> stream xÚÍ1 Â@ÐYR¦É œ¹€nv6 Än!he!Vji¡h'q¶GÉRZ¤pÐÂÖâ5ÿÃÿÎLÜ”s¶<6lgì  ^Ðæìì·9œ±ö¨·l ÔK‰Qûß®÷êz=gƒºáá|¾a à9€êGt)¤‘B(P¨D ´1ü¡ý‰• P²ªB“ʇêKù|P\xÜàÞ0> endstream endobj 118 0 obj << /Length 180 /Filter /FlateDecode >> stream xÚ3¶Ô3´T0P0RÐ5T06W01SH1ä*ä26 (˜˜Cd’s¹œ<¹ôÃŒM¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ?€XN0‚ò þ@¢þÇ Ar?˜°ÿ``?Àüƒ¿h²<Èx{:;Q'þBÔ êµc€™,ß¶ˆd/߯6ÿ0Ôò~0p¹zrrò¸Y endstream endobj 119 0 obj << /Length 174 /Filter /FlateDecode >> stream xÚ]Í1 Â@ÐR¦Ù˜¹€nvƒA"Ħ´²+µ´P,$îÑö(!¥E n¢…ÈðŠÿ1±ÄCŽXs_±q¬x¯èD:qeıþ,»#eÉ5ë„äÜÕ$‹_Î×Él9eE2çâhKEÎ ÐTðŸ76…0=„Ý Ó¦AËÞúGÓ4À¿ò+ìÄ áYa|#¬ïþxÕx¼€Ð‚f­è •›8¾ endstream endobj 120 0 obj << /Length 131 /Filter /FlateDecode >> stream xÚ32Ô35U0P0b#3s…C®B.# ßÄI$çr9yré‡+™pé{E¹ô=}JŠJS¹ôœ ¹ô]¢*c¹<]˜?0ðÿa°“c¨±gøaÏø„˜ð0?`ÀƒþÿDøÕØ?0Ôÿo".WO®@.y?B endstream endobj 121 0 obj << /Length 199 /Filter /FlateDecode >> stream xÚmË= Â@à' ƒ H2Ð$®Á&ðL!he!Vji¡(¤H‘£ ^Do°åV®#*ZX|3Û´;=Ž¸Ã­8aÝeñ&¦=éXÒˆ»É{µÞÑ §pÁ:¦p"9…ù”‡Ó–ÂÁlÈ’Žx)O+ÊGŒ îr¯ßœ¯ªÒ÷ª² §Wýï3É=TUÀ»4ÜÍ hâËðœʹ³tãCÙ°PZa€ô×3+eßGͨ_Õ5¨jîd4ÎiNÉñB@ endstream endobj 122 0 obj << /Length 146 /Filter /FlateDecode >> stream xÚ3¶Ô3´T0P0bcs3…C®B.c ßÄI$çr9yré‡+›pé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]Ø000ü‡ü ‚"â8ü¿ý8óö{ŒØ€ˆ ? Ä8q€VÿÆ? ÿÿÉÿ?&¸\=¹¹ þZP endstream endobj 123 0 obj << /Length 118 /Filter /FlateDecode >> stream xÚ3´Ô3µT0P0bCs3…C®B.C ßÄI$çr9yré‡+špé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ØøÈ?€@þì h€ýÀ Êùüÿóÿ—«'W =!45 endstream endobj 124 0 obj << /Length 186 /Filter /FlateDecode >> stream xÚ¥Í1 Â0àH‡ÀÌß LÛHur¨Ì èä Nê(¨è*=ZÁÁÑ#˜#t¬PZß+Ô 8äƒ÷¿ä Bƒ>†tL„Ã÷œÁpæóÈ‹Ýb zÆ€žS Ú.ðz¹@ÇË) ÜèoÁ&(3!DýC1òúU‹Ê J‰”èò¶ÇŒ™ãˆ½“OæÅã;'>L^®ä{wfÒÔ7pŸÊÚzÏ1ü[§Êy¥¨+Y§ 0³°‚/‰ÚQ endstream endobj 125 0 obj << /Length 105 /Filter /FlateDecode >> stream xÚ3´Ô3µT0P0bC 3…C®B.CS ßÄI$çr9yré‡+šré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ØüBþìT…üþÿûÿËÕ“+ "Â: endstream endobj 126 0 obj << /Length 164 /Filter /FlateDecode >> stream xÚ3µÐ3·P0P0bS c…C®B.SS ßÄI$çr9yré‡+˜šré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ØÔ3°`øøÿû? ÿ7¨°gà?Çøã|û†ŸøØ0üoà?TTz€ýCu€¡fà(þ ÿøüÿÇÿH@).WO®@.š`\ endstream endobj 127 0 obj << /Length 137 /Filter /FlateDecode >> stream xÚ3¶Ô3´T0P0bcsc…C®B.c ßÄI$çr9yré‡+›pé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]Øüc`øøÃÿãÌøï1~``ÿ">0ü€àÄZüÿ0üÿ'ÿÿ˜àrõä 䉴MË endstream endobj 128 0 obj << /Length 157 /Filter /FlateDecode >> stream xÚ36Õ32W0P0RÐ5T06V06TH1ä*ä26PAc#ˆLr.—“'—~¸‚±—¾P˜KßÓW¡¤¨4•Kß)ÀYÁKßE!ÚPÁ –ËÓEAþCýÆv ÌøØØø,yæv`QÆ5 ?`øÃB¦¦dó7°`?ÀßÙ ²›ËÕ“+ ×v?X endstream endobj 129 0 obj << /Length 186 /Filter /FlateDecode >> stream xÚÎ= Â0ð'·ô}Ð$¦ ],Ô ftr'utPtéÑêä¦pÈ:v( Ô¡›Ëoø¿OeL‚F4ŠÔ˜” ½ÄªÈ¥‚"Õ–vGL5ò5©ùÜåÈõ‚.çëyºœ’DžÑF’آΈ €}Úì»o øôJ`ÔÀJ¸30V@â óAñaÑKÚ}Ư/áæ©€ÕþîË¥ÌÿÂ5ø¾ÿ ŒÃ6?p¦q…_³Qn endstream endobj 130 0 obj << /Length 186 /Filter /FlateDecode >> stream xÚα Â0à  7ØÕAè½€&i í µ‚ÄI…Nµ–Gé#tÌ jRêàæò ÿ÷ŸJ&‘"A¥"•t”xAÛTP¬úÑáŒY|K*F¾´9òbE·ëý„<[ÏI"Ïi'Iì±È ‚fð0)03JÁk%øÚ/!¨=aÍ ÌàKÐór<ÿ¤ú¥;v¸Ž@ûÆõ`íÔØ_ZK¨-nù˜í‚ð­;pQà? S endstream endobj 131 0 obj << /Length 124 /Filter /FlateDecode >> stream xÚ32×3±T0P0a3c…C®B.#c ßÄI$çr9yré‡+sé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]Ø?üoÿó^ÿŒ:öuìêØ?ذ?`€¢$#þ ÿÿƒ—«'W  ¾1R endstream endobj 132 0 obj << /Length 169 /Filter /FlateDecode >> stream xÚ%É; 1Fá?•pÛ©œ»Í$:…(ø§´²+µ´P¦ÒmYf)‚H™"x _uŽô*Ë[î¶}.  É–)\ÚÿÙŸhR“Þ°-I/R&]/ùz¹IOVS6¤g¼5\쨞±úd-yvT"4h<ªŸ, È"2cA.®-^I@¡¢ÃÝa”¼ÐIüOÀ0‰hDù·8' yMkúúÙ;¥ endstream endobj 133 0 obj << /Length 138 /Filter /FlateDecode >> stream xÚ32×31V0P0TÐ5T02V01TH1ä*ä22 (˜Ad’s¹œ<¹ôÃŒ ¹ô=€Â\úž¾ %E¥©\úNÎ @Q…h žX.O†zEŒ˜Áˆýƒü†ÿÿ?3L8$ Y0~0`üPÀð#Áþ—«'W E‚;G endstream endobj 134 0 obj << /Length 139 /Filter /FlateDecode >> stream xÚ3¶Ô3´T0P0RÐ5T06W06PH1ä*ä26 (Bd’s¹œ<¹ôÃŒM¸ô=€Â\úž¾ %E¥©\úNÎ @¾‹B´¡‚A,—§‹û† ÿÈC þŒ?Àb4"Q æ ÌØA߯2ÿ0Ôÿò~0p¹zrrµMs endstream endobj 135 0 obj << /Length 167 /Filter /FlateDecode >> stream xÚ36×32V0P0TÐ5T06S0²TH1ä*ä26 (Ce’s¹œ<¹ôÃŒM¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ÿÿ0þ?&ø0Ô1°?`¨ Ì @Dã†Áðƒá2QÃØ%옠„=;gƒü=pâ`¿#˜? Œ`È`.WO®@._x3× endstream endobj 136 0 obj << /Length 195 /Filter /FlateDecode >> stream xÚ35г4Q0P0TÐ5T05P0²TH1ä*ä2± (Ce’s¹œ<¹ôÃL,¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ÿÿØÿcÿ!ù0`üÁ€ùÃöŒÉ ˜â `$ûF ([c”üÁWÃÜÀðƒÇ†™JÖHȱ!Hy igÀσDö00È'°Ï@"o00-þ€J²7B ?ÀÀåêÉÈqzGx endstream endobj 137 0 obj << /Length 189 /Filter /FlateDecode >> stream xÚÍ= Â0ðb/ ôÀX#~,ª‚ÄIÝ<’£cŽÒ#dì Ôò#ïÛ{'}ÉðÜHS9f|e7DÜa,Î\”l·â†l—Ȳ-Wr¿=Nl‹õL2¶sÙ¡sÏå\š¯i^’ ¿¤ƒ ¤‚ò¤jý"ª5=[ ù€ä LtSøÕ1i=A;ÝLR¬º$ 2`D{ÈxUSZánLãÿð¢ä ÿÇ;6 endstream endobj 138 0 obj << /Length 204 /Filter /FlateDecode >> stream xÚMÌ= Â@àY B. 8ÐÍfƒ?‚?` A+ µ¢(Xñâ™<†eŽ`iÐY“ˆÅ|Å{Ì3ͺoÈ#M5iߦµÆš€S“W«-öBT32ªç¨Â1öÇ ªÞ¤OՀ暼†z§â}ÿâ&p'ØÒ™€ò ˆ',3à÷œT0±¸œ%Ó‘S-ý¸1îµÀyðê?")°Ãv8C~á;YÒÒ²¥b‹EL׎¸ö‡!NñW> endstream endobj 139 0 obj << /Length 176 /Filter /FlateDecode >> stream xÚÊ; Â@…á3L¸M–»'„@Œ` A+ ÔRPÑ6ÎÒ²”,!eŠà8“âƒûø³d‘åsêœ.ùšÐ“R‹ýê—;U ©#§9©­»’jvü~}n¤ªýšR5ŸŽÏÔÔ\Zkf(Í % ´ˆ1á!&h1BË:è¡Ã; 2pðuZgFŒÂHo&è¥ ;i¢NºTº4pYˆŸ›¬µ3Ú4t ?»"6 endstream endobj 140 0 obj << /Length 163 /Filter /FlateDecode >> stream xÚ36Õ32W0P0VÐ5T06R01QH1ä*ä2² (˜˜Bd’s¹œ<¹ôÃŒ,¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. öþ``~PÃÀ~ÀŽ¿AžåØ@Ø,Ê~ ¬ÿa`þ@ F6«ŽýÌ.º¢ä{@"  w‚ÜËåêÉÈ•ÑQ7 endstream endobj 141 0 obj << /Length 116 /Filter /FlateDecode >> stream xÚ36Õ32W0P0bc…C®B.#c ÌI$çr9yré‡+sé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]xlj˜ÿ0üÿÃð§ĦúÇPÿÿq¹zrrí-É endstream endobj 142 0 obj << /Length 201 /Filter /FlateDecode >> stream xÚUαJÄ@à_,ƒ˜8îæt³IŽKåBŒ` A+ ïʃSÎîÐ}´–f˜l}n4ÑL¯¬f©æ¹n­|HZ°˜h¾Éû^ÊZ̳¦…˜{–ÅÔzüüÚ‰)oÕŠ©ôÅjò*u¥p ÎÆQ3àßXÀÑÊ;¬š™ùü#ðpC'êós0 äs/‹ñ¤ ¢nRЉ~ÿ‰ÛžÞØà\Áù%÷D~˜ïÄO¹«åIþ Cî endstream endobj 143 0 obj << /Length 169 /Filter /FlateDecode >> stream xÚ}ͱ Â@ à”B‡P:wÒ> stream xÚ…Ì= Â@à,Ct/ ì\@7b* À‚V"j)¨(XˆÙ£å(9‚e 1ŽØˆÅÃÌ›uÚa—}ޏpró& …‰,}Ž;ŸËzGý”̜ÄÌXÖdÒ ŸŽç-™þtÀ™!/ö—”ÙƒcU®­ª*¨ê)â.rx7ÀÃZrY‰zq…² Ú6dòàæ€[|ÔÞîß.âñ£”l!Vò»‡ÊéiB£\[©¤&³ QJ3zÄ@à endstream endobj 145 0 obj << /Length 217 /Filter /FlateDecode >> stream xÚUŽ=j1…ߢB0n°š ÄÚ?‹l¼E ®\˜T±Ë 6¤p!ðÅÔù{„-·0(ãu“ >4Ì›÷fÊé¤xæŒK~ʹ,¸ªxŸÓ3if\MÊÇ'-Zr[.fäÖÒ&×¾òñût ·x[rNnÅ»œ³wjW €ØCughŸÂÈKÇJû*üe>¢C#š ÆÛÎÀâ¹ÕÐq€Šd Í ÿ ê¡/Ñ[Ä¢‘ {ßÒý@õ2Ý÷HüCœ¢'2—Œ;ïwoÄU#‘;%ôÒÒ†~-¢MÜ endstream endobj 146 0 obj << /Length 210 /Filter /FlateDecode >> stream xÚ]޽jQ…OØâÂÙì¼€Þý%BÄ f·$•…X%)- "÷Ñ|‘}„[É:‹$Å7 gΙ™¬¤ÏsÆý„³”óœ¿ZS:T1漸N>W4©ÈÎ9’©L¶zãÍÏö›ìäý•²%/Ž—T• çV4;˜zŒÐGˆ¢ëÆ0þ ê^9Ã4guÝ!ÿQŸnh^t„ˆÄø š“ = \ÐæúJX?éM§·~àô‡‘Öz> /FirstChar 29 /LastChar 122 /Widths 147 0 R /Encoding 148 0 R /CharProcs 149 0 R >> endobj 147 0 obj [39.19 0 0 0 0 0 0 0 0 54.86 0 27.43 27.43 0 0 19.59 23.51 19.59 35.27 35.27 35.27 0 35.27 35.27 35.27 0 0 35.27 35.27 19.59 0 0 54.86 0 33.31 0 52.83 49.93 50.94 53.85 47.97 46.01 55.33 0 25.4 36.21 54.79 44.05 64.59 52.83 54.86 47.97 0 51.89 39.19 50.94 0 52.83 72.42 0 0 0 0 0 0 0 54.86 0 35.27 39.19 31.35 39.19 31.35 21.55 35.27 39.19 19.59 0 37.23 19.59 58.78 39.19 35.27 39.19 37.23 27.49 27.82 27.43 39.19 37.23 50.94 37.23 37.23 31.35 ] endobj 148 0 obj << /Type /Encoding /Differences [29/a29 30/.notdef 38/a38 39/.notdef 40/a40/a41 42/.notdef 44/a44/a45/a46/a47/a48/a49 50/.notdef 51/a51/a52/a53 54/.notdef 56/a56/a57/a58 59/.notdef 61/a61 62/.notdef 63/a63 64/.notdef 65/a65/a66/a67/a68/a69/a70/a71 72/.notdef 73/a73/a74/a75/a76/a77/a78/a79/a80 81/.notdef 82/a82/a83/a84 85/.notdef 86/a86/a87 88/.notdef 95/a95 96/.notdef 97/a97/a98/a99/a100/a101/a102/a103/a104/a105 106/.notdef 107/a107/a108/a109/a110/a111/a112/a113/a114/a115/a116/a117/a118/a119/a120/a121/a122] >> endobj 149 0 obj << /a29 92 0 R /a38 93 0 R /a40 83 0 R /a41 84 0 R /a44 86 0 R /a45 91 0 R /a46 87 0 R /a47 88 0 R /a48 140 0 R /a49 141 0 R /a51 142 0 R /a52 143 0 R /a53 144 0 R /a56 145 0 R /a57 146 0 R /a58 89 0 R /a61 90 0 R /a63 94 0 R /a65 95 0 R /a66 96 0 R /a67 97 0 R /a68 98 0 R /a69 99 0 R /a70 100 0 R /a71 101 0 R /a73 102 0 R /a74 103 0 R /a75 104 0 R /a76 105 0 R /a77 106 0 R /a78 107 0 R /a79 108 0 R /a80 109 0 R /a82 110 0 R /a83 111 0 R /a84 112 0 R /a86 113 0 R /a87 114 0 R /a95 85 0 R /a97 115 0 R /a98 116 0 R /a99 117 0 R /a100 118 0 R /a101 119 0 R /a102 120 0 R /a103 121 0 R /a104 122 0 R /a105 123 0 R /a107 124 0 R /a108 125 0 R /a109 126 0 R /a110 127 0 R /a111 128 0 R /a112 129 0 R /a113 130 0 R /a114 131 0 R /a115 132 0 R /a116 133 0 R /a117 134 0 R /a118 135 0 R /a119 136 0 R /a120 137 0 R /a121 138 0 R /a122 139 0 R >> endobj 150 0 obj << /Length 114 /Filter /FlateDecode >> stream xÚ36Ð31Q0P0b#Scc…C®B.C ßÄI$çr9yré‡+Zpé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]00>``ÀðÿÃO0›ZˆùÃÿÿ€ˆËÕ“+ °V4q endstream endobj 151 0 obj << /Length 171 /Filter /FlateDecode >> stream xÚɱ Â0à+ ?…®Dþ'0iZÐM¨ ftr¡ Ž‚Š‚CÁ>Z¥Ð1C 6ÂwÃÝårZ,ÙGÍ8Ïù’у”ºôÕ畚ĕ"±Vz˯çûJ¢Ü-9#±âcÆòDzÅEäšqÒʰ«‚¾‚10ö¯þ –°ðŒô;„-¢ 0æÀè´qØÄ#¤1Ò陯3ÎõZkÚÓg‚6T endstream endobj 152 0 obj << /Length 170 /Filter /FlateDecode >> stream xÚ-Ë1 Â0Æñ¯t{½€”wMÒtè”B­`A'utP:ÏÖ£ô3j‚Âoxü?ž–Ë> stream xÚMŒ1 Â@Dÿ'`ࣽ…Â?»›ÝB+!Fp A«b!j)¨èr4o2ÞÀÄ$X8¼)fÆÛIjÕ7ΦꃞœÜ$k;ÛÆv8^$bJÍ‚˜UÓŠ‰k}ÜŸg1ùf¡NL¡;§v/±P"w$ ´cƒf :€Þ ¸BR!eô}ƈ1dXÆ•FÝé÷óOò¡yÝ Ë([ù=í9÷ endstream endobj 154 0 obj << /Length 166 /Filter /FlateDecode >> stream xÚUɽ Â0@á wr,(Í}Í/è\ì èä ‚ Ž‚ŠB‡}´>J!c&cPá›Î1rl-I²4R¤'d ÞPë%û=Ç –Š-ib™2ŠjEûóŒ¢\ÏH¡˜ÓN‘Üc5§>È‚y-ÁÀCÿRL«× ™ËÁçê&‘ÐÔЄŸÚC8€¿27e®ÈÚ, €‹ 7øÔë21 endstream endobj 155 0 obj << /Length 171 /Filter /FlateDecode >> stream xÚ=̱ ‚PÅñ# ·¸6Èí{º^u¸ÃÌ ‡ ¦†hªÆ ¢ÀAÐÞÌGñ‹Âº¿éàDÁ(Ž9àˆ‡ŠCÍQÄ[EG ?Ñæø»lö”d$—Æ$§6“Ìf|>]v$“ù˜É”WŠƒ5e)ëQ¶N«{uß­Œ‡B ð¡ `@n•õýÕ<ª¶ƒî`žÖ ƒ¿üÇ@X>Œ€öpp¯ó²ÿ IF zË1/ endstream endobj 156 0 obj << /Length 156 /Filter /FlateDecode >> stream xÚ36Ð31Q0P0QÐ5T0²T06QH1ä*ä2 (›Bd’s¹œ<¹ôÃŒL¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž.  6 öÿÿQýÿõÿÿ$0È$0X$0`xp€áãæv6>0²£ ú€@ŒØÃ.WO®@.}Ñ/Š endstream endobj 30 0 obj << /Type /Font /Subtype /Type3 /Name /F37 /FontMatrix [0.02007 0 0 0.02007 0 0] /FontBBox [ 3 -1 29 34 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 49 /LastChar 55 /Widths 157 0 R /Encoding 158 0 R /CharProcs 159 0 R >> endobj 157 0 obj [30.44 30.44 30.44 30.44 30.44 30.44 30.44 ] endobj 158 0 obj << /Type /Encoding /Differences [49/a49/a50/a51/a52/a53/a54/a55] >> endobj 159 0 obj << /a49 150 0 R /a50 151 0 R /a51 152 0 R /a52 153 0 R /a53 154 0 R /a54 155 0 R /a55 156 0 R >> endobj 160 0 obj << /Length 116 /Filter /FlateDecode >> stream xÚ36Ö3°P0P0b#sc …C®B.#C ßÄI$çr9yré‡+ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž.   Œ?ä0üûÁðàmã†ÿÿ—«'W ;> endstream endobj 161 0 obj << /Length 184 /Filter /FlateDecode >> stream xÚMÌ= ÂPà …ER[î |/ BlüSZYˆ ¨¥ ¢`4Gó(9‚¥…øœ …,_±3ËÆqË&j5¦¨£q¢›Pµ¹Ûr-‹õNú©˜¹Fm1c¦bÒ‰žŽç­˜þt ¡˜¡.BµKI‡ ø®@à?PÇV¸â‚^ôF3ÿ—¾ö@—®”Ñð|XШò,`Ü „ö^ŽÂËËñá£B5j:‡ÝÜîGF©Ìäœ_8Ÿ endstream endobj 162 0 obj << /Length 187 /Filter /FlateDecode >> stream xÚUŽ= A …ß`!¤ñÂæ:³;,¬ þ€[ZYˆ•Z * "Í£ì,-Äõ bñòå%Äû¦ËÔ©×F¬IK}¦«Xv’¤”Ô­Ïd¹‘^.v¦I*vD-6ëa\‹íMú‹è> stream xÚuÍ; ÂPÐ C°¶H1+ðýŠÄJˆ|… •…µT,]šKÉ2RÏ›h+Ì)î|çÆº-j"ö9r»ÁþÄ¥gµ›³Z ËÊ/åv½Y•«™V•lèûJˆ úz@KýÄÀRÈ`ÎP륨ól)ÆmCH!ƒ4ÔÐ@¡éôþ‰ÞDSìÏ=¯ùoBÑ endstream endobj 164 0 obj << /Length 180 /Filter /FlateDecode >> stream xÚ}α ‚`àn— Õ@ò¾@ýÿ¯ Ñ`955DTcPQÐdoÖ£øŽ’]‰†–†o9÷À¹¾ß×!kö¹gØ‹ØykèH^ ¡ÄÑç²ÙSœ’Z°šHL*òùtÙ‘Šg#6¤^Ö+J¶Ñ‚‹ܺ…ÈE†NØøãtšî£@W8H>”KÃýÊZÈBVý‚¬a-ýƒÐòƒƒm+‡U?@ã”æôä3 endstream endobj 165 0 obj << /Length 191 /Filter /FlateDecode >> stream xÚ]Í1 ÂP à_…,^@Ú\@__‹P¡¼BU°ƒ “ƒ8©£ƒ¢à Ø£Õ›ô;TkT‘ðeø“Ïë9>;ìqW³;`Ïç¦=¹} %|&ëE1©»}R‰IÅS>N[RÑlÈšÔˆ—šÅ#F;Â(:¨eMÔS­$D!la #ݸ|5D˜¢¬rÜÓ3*Âà!{¹z)a'¿.Ù1Âz—‘ü=£~ËQ«Rà Ð8¦9=ò8Å endstream endobj 166 0 obj << /Length 165 /Filter /FlateDecode >> stream xÚ36Ö3°P0P0VÐ5T0"K…C®B.#s  ‚ T&9—ËÉ“K?\ÁÈœKß(Ì¥ïé«PRTšÊ¥ïà¬`È¥ï¢m¨`Ëåé¢`ÀÀÀ`Äöÿÿ?€âõÿÿ7€p;Có0`b .ª~ƹ‚ÙBü@ÌÄrHØj× á?p—«'W ðÀ4Ö endstream endobj 29 0 obj << /Type /Font /Subtype /Type3 /Name /F35 /FontMatrix [0.01721 0 0 0.01721 0 0] /FontBBox [ 2 -1 31 39 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 49 /LastChar 55 /Widths 167 0 R /Encoding 168 0 R /CharProcs 169 0 R >> endobj 167 0 obj [33.08 33.08 33.08 33.08 33.08 33.08 33.08 ] endobj 168 0 obj << /Type /Encoding /Differences [49/a49/a50/a51/a52/a53/a54/a55] >> endobj 169 0 obj << /a49 160 0 R /a50 161 0 R /a51 162 0 R /a52 163 0 R /a53 164 0 R /a54 165 0 R /a55 166 0 R >> endobj 170 0 obj << /Length 192 /Filter /FlateDecode >> stream xÚ…Ž1‚PD‡PlÃØ èÄŠ1‘ÂD+ c¥–m…£q@IAˆû;“WÍÎÎL0›† vÙ xólÎaÌgnäû¢ºEãét¥4'µgß'µT¾áÇýy!•n—ì‘Êøà±{¤> stream xÚ…O; ÂP±lãÜ è{IüÄ* L!he!Vj)¨h-GÉ,-$q̃´ÂT;ß…ÃñL­NuihuéÉ—›V'Ç/2OÅì4Ĭx“®õqžÅÌ7 õÅ$º÷Õ$Mô |€ ¨,G\ WÂ{¡ûFÇ9úé^Ù€"J[|š¼ ¬µÐîrè’YÁ"Ö±4nT?…”pGrjݬc_e*[ù«ËM* endstream endobj 172 0 obj << /Length 114 /Filter /FlateDecode >> stream xÚ31Ö3µT0P04WÐ5W01T0µPH1ä*ä22Š(˜™B¥’s¹œ<¹ôÃŒŒ¹ô=€â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. õÿÿüÿÿ†þüa`üè?’›îçrõä ä—5ez endstream endobj 173 0 obj << /Length 116 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0VÐ5W02W0µPH1ä*ä22 (˜™Bd’s¹œ<¹ôÃŒŒ¹ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. õÿÿüÿÿ‚êÿÿc`¨ü¨æ`°›ÿp¹zrrléI endstream endobj 174 0 obj << /Length 175 /Filter /FlateDecode >> stream xÚµ± Â0DQXúKä'2Ò† á * D” ¨Ãh%#¤¤Âü#6HáWÜYòóMíÄÈà0žÃp œsº‘µf˜¹Øœ®Tz2{XKfÍ1¿Áãþ¼)·Käd*rdGò”R/¥RA-œ%¡a|¸½ݠЂ´V$‘Q¬ùµñžî†·êÞoÄ×e«ú¿U¿ïG+O;ú‚a endstream endobj 175 0 obj << /Length 171 /Filter /FlateDecode >> stream xÚµ± Â0EQ Ýù €miCp¢ ” ¨“Ñ…(©0¾ó i~ñϧ{~37õ <& ¸ ~‰³¥9—Jƒ¹Ï“öJu }€s¤7©&¶xÜŸÒõnKºÁÑœ(4è^J©øåøqÄ^©.JùNQrŒ?)F#ŒPäëQ1H¢)3RŸ;™Ê;Ù˜J~.؆xCÙˆ?ZÚÓOYbÍ endstream endobj 176 0 obj << /Length 104 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0UеP0¶TÐ5RH1ä*ä26 (˜A$’s¹œ<¹ôÃŒ¹ô≠ô=}JŠJS¹ôœ ¹ô]¢  b¹<]êÿÿÿÏÄÿа—«'W *› endstream endobj 177 0 obj << /Length 171 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0S0W0¶P01VH1ä*ä26Š(›%’s¹œ<¹ôÃŒ ¹ô=€¢\úž¾ %E¥©\úNÎ @Q…h –X.OæöX±ûŽììþ±ø÷Ÿýà¿ÿÇÿûÿüü?ûÿÿðÿÿÿ€ùÿÿÆÿÿêÿ€1ˆ ÉÔ€Ô‚õõ‚Ì™2—} ·p¹zrr«xSº endstream endobj 178 0 obj << /Length 136 /Filter /FlateDecode >> stream xÚ31Ö3µT0P04UÐ54R0² R ¹ ¹ M€Â FÆ0¹ä\.'O.ýpC.} —¾§¯BIQi*—¾S€³‚!—¾‹B´¡‚A,—§‹ƒüûõ?€ðÚÿ‘ÿÃÿ‡áÆŒ?˜?°PààP—«'W ŸÒ,5 endstream endobj 179 0 obj << /Length 99 /Filter /FlateDecode >> stream xÚ31Ö3µT0P04F †† )†\…\@Ú$l‘IÎåròäÒ pé{€IO_…’¢ÒT.}§g ßE!¨'–ËÓEAžÁ¾¡þÀÿ0XÀ¾AžËÕ“+ ‰;“ endstream endobj 180 0 obj << /Length 157 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0UÐ5W0¶T0µPH1ä*ä26 (˜™Bd’s¹œ<¹ôÃŒ¹ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ì@ÌÀß#äÁHÌD؈:Q'þ€ˆ@Ì&> f0ñd˜82î>3Ñ dfâ ¸™¢Dp¹zrr@Ä:Õ endstream endobj 181 0 obj << /Length 107 /Filter /FlateDecode >> stream xÚ31Ö3µT0P04F Æf )†\…\††@¾ˆ –IÎåròäÒW04äÒ÷ sé{ú*”•¦ré;8+E]¢zb¹<]äìêüƒõìäðì:¸\=¹¹{-= endstream endobj 182 0 obj << /Length 155 /Filter /FlateDecode >> stream xÚ31Ö3µT0P04UÐ54R06P06SH1ä*ä24 (˜XÀä’s¹œ<¹ôà M¸ô=€\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ü òìÔ€Aûòøð Žöêá´ÿ#ÿ‡ÿÆ ?0`ÿ ÿ þÀÿ†ÿ@¡.WO®@.…8 endstream endobj 183 0 obj << /Length 110 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0V04S01T06QH1ä*ä26 (Z@d’s¹œ<¹ôÌ͹ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. õÿÿÿÿÄÿ °‘§\®ž\\ºâAŠ endstream endobj 184 0 obj << /Length 184 /Filter /FlateDecode >> stream xÚmÉ=‚` à’.žÀ߉1‘ÁD'㤎]…Ä‹‘8p n #¡~ $(}úö­ëL<ŸL²å¸6y6í-<¡Óvf{¶ÝÃÅšÅ\¶(â]Î׊p9% ED‹Ì-Æ4 ð•Óžgö&ëÉ{ô¼øâ!1îå¥qƒú?µ\ÀÜ P˜ùCÁµ#ýA“dZz–4Àu ×,iºÔu8‹q…/ÂaoM endstream endobj 185 0 obj << /Length 190 /Filter /FlateDecode >> stream xÚ}±‚0†K:˜ÜÂ#pO`iÀ‰1±ƒ‰NÆI4º æ£ðõ®ØîKÿëÝùÓd¹Ê0FM•j\i¼jx@½˜%\îPPGL2P[ê‚2;|=ß7PÅ~¤K<ÑäL‰•s ´Â9×óËy|¥9#l K#‚vÓœ_ó[¹Z²½äC„N Ò_‹¦C£•èFôŒÏ,úa8è—‘[NÔøXT®®þQ­€ü÷âŠÝ endstream endobj 186 0 obj << /Length 183 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0bCSC…C®B.c ßÄI$çr9yré‡+[pé{E¹ô=}JŠJS¹ôœ€|…hCƒX.O…úÿÿþÿÿD|?€bØ0ÿ ÿAD}°ò€ÿÁ&> f0ñH0b!þO ¶ƒn%Ørv¸ƒÀî³?sóˆ?À>û æË `Ÿs¹zrríÇG endstream endobj 187 0 obj << /Length 147 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0b#SC…C®B.c˜ˆ ’HÎåròäÒW0¶äÒ÷Šré{ú*”•¦ré;8+ù. ц ±\ž. õÿÿÿÿÄÿ Øæ Œ„ † ‚ƒ`|$€lthv›bˆ)ØŒ‡6 ¢Žä£ÿQ Ø.WO®@.ÌŒ‡r endstream endobj 188 0 obj << /Length 227 /Filter /FlateDecode >> stream xÚÐ=NÃ@à±\¬4๬¥PY AÂT(PR$‚ÖÞŽkÍ ¸7eŠU†ÙI"QÒ|Åìß{;—Ý5袥ùŒº½´¸Á°ÐaC]8®<¿ár@ÿHaþVÇè‡;zß~¼¢_Þ_S‹~EO-5kVE*#TòÉPËŽaa¥'\¦BÙƒ°û‰«oè¹Ò\Qéõ4÷pf<á¢`2éß”²Oà$‡Ì˜gãßëíµúD> stream xÚ31Ö3µT0P0b#SC…C®B.c˜ˆ ’HÎåròäÒW0¶äÒ÷Šré{ú*”•¦ré;8+ù. ц ±\ž. õÿþÿùÿŸñÿ?cÀÀ€êÄÿÿÿ±4± Nàô%—«'W žˆ‡ä endstream endobj 190 0 obj << /Length 108 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0bc SC…C®B.crAɹ\Nž\úá Æ\ú@Q.}O_…’¢ÒT.}§g ßE!ÚPÁ –ËÓE¡þÿÿÿÿÿÿà >ÿ†Áޱ¹›ËÕ“+ H¨X~ endstream endobj 191 0 obj << /Length 123 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0bCSC…C®B.cs ßÄI$çr9yré‡+›sé{E¹ô=}JŠJS¹ôœ€|…hCƒX.O…úÿþÿÿ€L€Å˜ŒÁN|Œ?ˆ êÿÿÿÿã?*ûÀåêÉÈé f’ endstream endobj 192 0 obj << /Length 170 /Filter /FlateDecode >> stream xÚÅ1 Â@ERÓx„Ìt³Ž)R-Än!he!VÆÒBÑÖä¨9‚¥EØq™Š†Wüßü7sžæe”ÓÄ”Ϩ¶xAæƘ‡æxÆÒ£Ù3šUŒÑø5Ý®÷šr³ ‹¦¢½¥ì€¾"h é`,ò‚T¤'ÀuID ˆ§x¸/„ˆ¶Hÿ ¡øÙ÷®î9 ƒ›Zª¯šëpéq‹o¡lª endstream endobj 193 0 obj << /Length 197 /Filter /FlateDecode >> stream xڕСÂ0à›jrfÐ{Ø::"#a‚‚ ‰€€îÞ e0‰XvtmC‚ùÄßöîOõh˜Ž)¦„Š´¦TÑ^á µ²aLiâOvGÌ ŒÖ¤FscT,èr¾0Ê–S²iNûf‹EN†`æÒY9†»Q‰¶3p‚qNÊNÙ3¼ÿ¶ßO0ïÉn‹ßè¶ ×ÄZ¿’J4½&}þ5tÊò›¦y+™A²ý ½-ؼ+Ô€³Wø2>z endstream endobj 194 0 obj << /Length 236 /Filter /FlateDecode >> stream xÚu1NÄ@ E½Ú"’›a|˜„$ÕHË"‘ * D”H»$*â£å\!GØ2HQÌw€‰æÉãÿmÿ©«ãæT ©å¨”ºæDJÞsÕ ‰gõ­Ü?ñ¦åx#UÃñmŽí¥¼<¿>rÜ\IÉq+·¥wÜn…˜™åº2ûÐÌÌ4w„C0Mý€¤LúNÔéL”túAø ¨9ÁçÒ„Éa=tC¹6”8y€ÇF¢Ì›Ôa¥OÚ2éý/òaÁ<Ãô&ÄØùE>oùš¿åxv endstream endobj 195 0 obj << /Length 124 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0b#SC…C®B.c˜ˆ ’HÎåròäÒW0¶äÒ÷Šré{ú*”•¦ré;8+ù. ц ±\ž. õÿÿÿÿÄÿÿ¡êêð@†H0 zÂþÿ(Qÿÿ—ËÕ“+ +òT¬ endstream endobj 196 0 obj << /Length 167 /Filter /FlateDecode >> stream xÚÕË1‚@…áG(L¦áÌtYY +ÄD ­,Œ•ZZh´†£qŽ@IaGhôf'_ñϬ‹gÉ‚#}SËÎqbùléF.b27§+e™=»˜ÌZ3™bÃûóB&Û.Ù’Éù`9:R‘s)U*µH]JóíØý^‡¿w˜ŸøÂ¤Ôè¨%ÂH«´RQCôª/ê‰~ú´*hGo8‚˜ endstream endobj 197 0 obj << /Length 191 /Filter /FlateDecode >> stream xÚmÌ= Â@à Óx„¸ ‰‚Õ‚?` A+ ±RK E[“›™£ä)S,;Îh%Xìûfæùh<¥” }å:exÅ\³T¿:8^pV¢ÝQ>E»’m¹¦ûíqF;ÛÌ)C» }FéËEÜ$ s­´àXBט^H”ȃ©ÁÃ@ž?|be¨®ŸàzY©E—ƒâÿðTZ_Õq×-`öRÅ!a~…ˆƒ„®K<.KÜâj/\ endstream endobj 198 0 obj << /Length 187 /Filter /FlateDecode >> stream xÚŽ= Â@…g°¦ñ™˜„Ä"•#¸… •…X©¥…¢­ÉÑr”aË€!ãN;±˜æï½GÓY‡®âg!ŸBºR¤³@[]/”òw%ä¯Ü”|³æûíq&?Ý,ØõïÝåLƹ©¿+ðx•ƒ“À—´€"Ò¡@±y‰Rx Œ-¶0ª±éþ~Ð*ž?¢uîmÖ½rç!0±ƒe¥æ] ÔEÓ`ç%ÐÒЖÞ*Åsz endstream endobj 199 0 obj << /Length 182 /Filter /FlateDecode >> stream xÚŽ1 Â@E¿¤¦Ik—9›°° Än!he!Vji¡h›äh%G°L2ΦÐÖ…}ðgÙ?of§óÇœêÅlS>'t#k5Ñ?œ®”;2{¶–ÌZ§d܆÷ç…L¾]rB¦àCÂñ‘\Á¤"iJzŒDˆÆ=á[5/”ÈjLAOåQ~Ñý‰ß¡@«B_ÕZ¯h4èÊJ—â5¡Î«µ^RMuZ9ÚѲuEJ endstream endobj 200 0 obj << /Length 193 /Filter /FlateDecode >> stream xڕα‚@ à’.<} L— &Þ`¢“ƒqRG®â›á£øŒ—;[pqÓᾤ½´ý 5)+ÊHñ+•9ís<¡’^&¥|ìŽXLפ*LçÜÅÔ,èr¾0­—S⺡MNÙMC±€Ä  ÿ$z1Ú1Þwxï!"Ëûâ>ô<æôZ™iá&³N°?â>cíH ãRa¸ÊÉHŽ'c Ë:ÇÑ´m™¸O,Î ®ð —ºYK endstream endobj 201 0 obj << /Length 201 /Filter /FlateDecode >> stream xÚmޱŠÂPEï’âÁ4ù„ÌìKˆ¬® ›BÐÊB¬Ôr‹mM>í}ÊûËâì}VÌ™;ܹ“ú³™i©“Ô¥ÖS=Tò'uÃù9&aÿ+óNüFëFü·â»¥žO—£øùêK+ñ ÝVZî¤[(²€ÂÐÛ f#2³;܃J>ÂPD´Cˆv@Z }•ˆ„‹÷c½C  ¤7¸¾Ð'Ð* 4u‘ö.æ7ú¹mp Ìb2ræcÀòÝÉZþI÷_þ endstream endobj 202 0 obj << /Length 154 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0asSC…C®B.cßÄ1’s¹œ<¹ôÃŒ¹ô=€¢\úž¾ %E¥©\úNÎ @¾‹B´¡‚A,—§‹ÿû@âÿÆÿÿ˜AûŸz ñHð?°*;&põÿÿÿš4A€Åðk£aÿÿÿ[~ `1.WO®@.òÅ^£ endstream endobj 203 0 obj << /Length 253 /Filter /FlateDecode >> stream xÚ}±JÄ@†ÿ#E`š}!óšÄä”k.pž` A+ ±RK E»#›ÎÇðUò(y„”[,g‚²ìǰóÿÿÌÖÕÉzßòq¹áºâꜟJz¥º`;볟Öã íZÊï¸.(¿ÒwÊÛk~ûx¦|wsÁ%å{¾/¹x vÏ’€4¸ˆlnfxYé•DdöItÁ§S¶n\Å#7@efd=º`’El6X4jB*²`„éá¾fÀ}E_éh0‡íb•ôj“1SLÍ€,xÝ>v*‹Å!*:MÃö–Æ¢ó½:²?-y‰%Û§F‚Í@—-ÝÒ7ãè‚> endstream endobj 204 0 obj << /Length 161 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0bcSC…C®B.ßÄ1’s¹œ<¹ôÃL ¹ô=€¢\úž¾ %E¥©\úNÎ @¾‹B4Pe,—§‹Bý øÿ¬“Œ‘ò@dý ùóÿ? ùûÿ ùB~°o’äAdƒü ÉÀ$ÿÉ?Häz“õÿøÿÿÇÿÿIˆ8—«'W ƒzú endstream endobj 205 0 obj << /Length 132 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0bcKS#…C®B.cC ßÄI$çr9yré‡+ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž. ì ò ØþÃÄ@òx@ýÿ@ü€á?×C1;}pýÿÿþÿÿÿ†A|.WO®@.üØO) endstream endobj 206 0 obj << /Length 198 /Filter /FlateDecode >> stream xÚÌ;‚@à%$Ópçò.¨H)L´²0Vji¡ÑV¸‰Wá(xŒ…[Æ_­Å~Éü³ó‡Á0ŠÑEŸ_ècäáÆƒ=’¹2Êb½ƒ4gA ΄Spò)§-8él„ôŒs˜ÃQ¹yÀ endstream endobj 207 0 obj << /Length 115 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0b e¨bÈUÈel䃹 ‰ä\.'O.ýpc.} (—¾§¯BIQi*—¾S€³ï¢m¨`Ëåé¢PÿÿÃÿÿ‰zÁÀ<Œˆúÿÿÿ7ñÿ,ÆåêÉÈî{\W endstream endobj 208 0 obj << /Length 171 /Filter /FlateDecode >> stream xÚ½Š= Â@…·[˜&GÈ\@7!Q°1#¸… •…X©¥…¢õ^,7ðæ[n±ì8šÎȃ÷WÃÑ3ä‚r„Å9œAl&’ø]ö'¨-˜\À,¤c—x½ÜŽ`êÕ s0 nå¹Û =œî=Cê¿bq䙣Ò1 S¥e¬”ö‰K•vI'ì’ö‡mrÿ/)Tžòì8R`ßû¾‡¹…5¼ízfÊ endstream endobj 209 0 obj << /Length 155 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0bcc3…C®B.ßÄ1’s¹œ<¹ôÃL ¹ô=€¢\úž¾ %E¥©\úNÎ @Q…h ÊX.O…úòþÿ¨ÿ$þÿ$ÿÿÏÀPÿD2þÿ`ß$ȃÈù@’Hþ“Èô&ëÿ?:ñÿÿÿÿ7 “q.WO®@.‹£ll endstream endobj 210 0 obj << /Length 183 /Filter /FlateDecode >> stream xÚ}Ž=‚@…‡XLÃvNàBL¬H·0ÑÊÂX©¥…F[Ù£íQ8¥…a†‚Îb^2ï}¹™KJ)*%³ K†w4÷Ò‹ó +‹ú@¦@½á)j»¥çãuE]íV”¡®é˜QzB[Ä_P¥ ¢:˜…ðá9o’.êAµ@9(¡dq%Ÿ»7@â'a¸ý/=ßµÓGÃ.^¬ÄTyhÆ ‰”pÁ A!\\[Üã>P: endstream endobj 211 0 obj << /Length 200 /Filter /FlateDecode >> stream xÚ¥= Â@…g°¦ñ™ èfI"¦üSZYˆ•ZZ(ښͣä[.(w“€–‚S|Åæ½7q4HRYs_8Ö ù éL‘WCNâvµ?Ñ$#µá(%µp:©lÉ×ËíHj²š²&5ã­æpGÙŒs” V,ÈS*7;(& A‰]ƒt,¾à -À•ÇýGTÎÀµ@Û8×=ÓF–>¼®á ¡¯†¾$Úñ¼Ë_È¥÷ªùF­Ñ<£5½Þ¯ì endstream endobj 212 0 obj << /Length 158 /Filter /FlateDecode >> stream xÚ­É1 Â@ПJø—ðŸÀÝu£Äj!Fp A+ ±RKAEëõh9J¼AÊÁqc!Ú[̃™Ií`4-ØԈËÞð™m»îjw쎜{Vk±«y\Yù…\/·«|9ê½e_Hx’+5ÐCôÑ8´äÂ#‚$ÒRC®¡¹šˆ\õ¡ì¸ÿBÿ"¨¿xo<ó¼âõõIw endstream endobj 213 0 obj << /Length 185 /Filter /FlateDecode >> stream xÚMË1 Â@ЋÀ4!s7q5Æ@T0… •…X©¥EÁÊÍÑrr‹ñ,,Þ2³óÿÔŽg©D’€MÅ&rŽùÆv‚=ê×þpºr^°Ù‹°Yã—M±‘Çýya“o³YÊ!–èÈÅRÈùr¨êGB®ù7 }Kïÿ´D#"×eZS¨¡W¡ÿ!§ˆ("P÷B Ca÷£}­¢9ª6A«ª=> stream xÚ31Ö3µT0P0bc 3…C®B.cS ßÄI$çr9yré‡+›ré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ä€Àž¢þÿÿÿ @ü¿A€ÅH2…‚ù`€hàÀ ß €AþAý~ [@óÿ Œÿ€LxÀÀåêÉÈþ:B„ endstream endobj 215 0 obj << /Length 148 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0bcc3…C®B.ßÄ1’s¹œ<¹ôÃL ¹ô=€¢\úž¾ %E¥©\úNÎ @Q…h ÊX.O…úÌÿþÿ`ÿ…¬ÿÁ $0ð()DÚÉ? õþÜÆðêdƒ=˜”ÿH2ÿcÿÏÀåêÉÈÄ£d> endstream endobj 216 0 obj << /Length 186 /Filter /FlateDecode >> stream xÚ5Í= Â0ÀñW:oéúN`ú¥ÐÅB­`A'qRGE7©…^Ì­×è êØ¡4¾Ø”É? ‰Âé,&žQ@áœÎ>Þ0ÔÍÓ[}pºb*Qì)ŒQ¬¹¢zÜŸévI>ŠŒ>yG”½•¥:ÅôJ•^ý›]ƒS |Á-,ZHZX:È^<rœ[CÂ×Á准’qÊz¤b&Õg¤aì¦QŒ¥À½†¿À•Äþ$›Lã endstream endobj 217 0 obj << /Length 174 /Filter /FlateDecode >> stream xÚ31Ö3µT0P0bcc3…C®B.ßÄ1’s¹œ<¹ôÃL ¹ô=€¢\úž¾ %E¥©\úNÎ @Q…h ÊX.O…úÿ `Ôðÿ?ÃÙaCÄÙ00~ @2?ÀDv`²N2~¨+þߎ ¿#Èß``’ ?Ÿ‡“¿¿G#«¾g``¨?øA6 Hû†@Rž¡†ËÕ“+ Ém¢ endstream endobj 218 0 obj << /Length 202 /Filter /FlateDecode >> stream xÚEŒ; ÂPEoH!Lãœø£‚UÀ˜BÐÊB¬ÔÒBÑN!…Û²³t î@Ë!ãL@,ÞaæÌ»·µ{¸£¯Ûá¨ÏÛ™ lµÃfOÄܒ£¹©ZrÉŒOÇóŽÜp>âܘW!kJÆ‹/ŸLnRüQ;”H¡(Ô+€Øû­Üp{Íçh¼¯€/ O ¨.†êçê«oŸk> ¹¶´¬4¶ú…¥4Wè¬&F&ž”™äRŠ¢ª§ÚÑ$¡}¨xY& endstream endobj 219 0 obj << /Length 237 /Filter /FlateDecode >> stream xÚEαjÃ@ àßdˆ‚ÁzöìØ)ÍCšB=Ò©CÉ”dÌÐÒnÆvÈÐ×jé‹:tÍ&É=Žûîî$%ñÍpÄ!ø:ºãdÀñ-¯"z¥X£!—Znh’‘yæxDæQâd²¿¿}¬ÉLæ÷‘™òKÄႲ)—Ö³µ[{²v§È­õöð+ïðOPy5À‘ Æ@®²äÌ©¤äUíð·-Gÿ[ùÙ;z¿Êßàµ[*ö‚l”ãŽBÉ;¥v\ɼHer”;åSú¾H‹R §Z88 ¾~íKôÑßÍa{ endstream endobj 220 0 obj << /Length 176 /Filter /FlateDecode >> stream xÚ}Ž1 ÂP †S2Y<‚9¯Å*B¡Vð ‚N⤎Š®­Gó(ï¤Ï¤c‡|?!?É'ãéœSžèä3>gt#Í”»Õ§+•žÜ^wrëŽ~ÃûóB®Ü.9#Wñ!ãôH¾â"Æ…ôPŒ‚¢x+š—"B I À/ >Š¡€i`˜¦$fà_£…$hŠ¡¨†¢Šj(ª¡D{£{-ÐÊÓŽ~æêb° endstream endobj 221 0 obj << /Length 203 /Filter /FlateDecode >> stream xÚ= Â@…_°L“#8ÐMLRØðL!he!Vji¡h'š£å({„”!qœ-–6ß²ó`ö}›ÄÃtÌ!'<ˆ8 9ñ1¢ Å© å»äp¦iNfËqJf)c2ùŠo×û‰Ìt=ãˆÌœw‡{ÊçŒÞ@в¶^m ´­…ו„û•W÷¨”x:ô däTLdOñ”€_Öû'¤X`–*ºw]!WÒ¢qµ½z¨‘º9KõUóïÐ"§ }}dà endstream endobj 222 0 obj << /Length 141 /Filter /FlateDecode >> stream xÚ31Ö3µT0Pac S#…C®B.# ßÄI$çr9yré‡+Ypé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]Øø XŠí¸ˆÿ7001;×ñ¾Äójä‘Ô®ÿÿÿÁÿÿÿ?À0ˆÏåêÉÈÅFJÜ endstream endobj 223 0 obj << /Length 222 /Filter /FlateDecode >> stream xÚe1N1Eÿ*…¥i|„Ì ð.›-V Ab $¨(U ¤A›Ý£ù(>BÊÑóÓ„,?kÆÿWíEw¥µ®¸kí.õµ‘i;¯O%/¶ï²$=iÛIºó®¤á^¿>¿ß$­n´‘´ÑçFë6Šx0ڄʬ ˜íÍŽX⌾T†~ÂèËϰœfGvÄlŽâgØ×ÎOÈ —˜À<|žðHTGÇ‚+î©¥µ§Ë‡D5ÿWôTŒL3ü*Ù¡¸=·‡2šÿÐþ‚½,·ƒ<Ê8hñ endstream endobj 224 0 obj << /Length 226 /Filter /FlateDecode >> stream xÚEнNÄ0 ðÿé†J^òñ @ZÚHH•îC¢L ˆ @°Ò>ZåáÆ§úl·ÀŸDZãTåe}Í9W|Qp•s}ů}PYkP·å|òòN›–Ò#—5¥[ SjïøëóûÒæ~Ë¥?œ?S»c„€Nz¬DÈDF‘â˜Mˆ&4=:4§WâLì• «hLºVÆÚšÄQ—5Aýâ1;Í,òw×Ki üs°Ä™ãÇ…à Îdw;«Ò-¯—y"ŸÍ§\Û¼>¹ÿí[z 3áVc4 endstream endobj 225 0 obj << /Length 181 /Filter /FlateDecode >> stream xÚ•Ï=‚@à!$Ópæ.¿ bâ&ZY+µ´Ðh £pJŠëL±hë$ó%ó^5YºÌ Š(áÍʺÄxÇT²HN)Î7¬4ª¥ª §¨ô–ž×Uµ[QŒª¦cLÑ uMþÁÄ„B9ÓÌÆ›‹‘ñGÐ3aç(if ãMŽÅ( Œ/½#ì˜`Ëc„÷—V2öOZË¿Z;ý®5îñÜþtý endstream endobj 226 0 obj << /Length 207 /Filter /FlateDecode >> stream xÚ¥Î= Â@à‹À4{„Ìt³&)!à˜BÐÊB¬ÔÒBÑÖ,x¯’£xË’qFEÐÖæƒÙ}o“¸v)¢„ZŽ’ˆRGk‡;ŒSʱóÚ¬¶ØÏÑÎ)NÑŽeŒ6ŸÐaÜ íOäÐiá(Zb>$Ã\CÈÌßÈÌüǹ.ì5ïªTʺ)ñ7¢ ½œùPÐ €ù\è)'…ߘ'å-,e›ù$9óÒ‘• i«ÌŒþ `¾AƒYÒ Öš G9Îð-²c— endstream endobj 227 0 obj << /Length 241 /Filter /FlateDecode >> stream xÚmŽ1NÄ0E”"Ò4¹ž @’T––E"Th+ ¤Ø´±æ£ø)S„ ãÍ“ü=3ÿuíEÅ5w|ÞpWsÉ/ ©í5ÔgûýóüF»ªGn{ªn5¦j¸ã÷ÓÇ+U»ûkn¨ÚóSÃõ†=6™Ì@! `dÕHpÑë³Îç³¢˜¢¢Œ°0g0º°¿p ã†\ÏF<'Ÿ"D´MÖbLz[‚Îë€õZj6]*7DEñã?°?(£j”A…LP5ãË GÕÔ¡˜µ(O•Y*GÒ@BRƒæ ›è þ5pI endstream endobj 228 0 obj << /Length 213 /Filter /FlateDecode >> stream xÚ}O» Â@œ`q°M>!ûz‰I «€0… •…X©¥…¢­É§åSü„”Áõ²W؈p w»3s3Y:Ê'sÆÃ„³˜ó1ºPš»¡{¦~s8Ó´$»å4'»tc²åŠo×û‰ìt=ã„ìœw Ç{*ç Ó(¤Džˆ¼`D:„y#jAÔ BQ»SQ]9h@ø”¢9…׆mðÆ 3/"-PIÿoÓ™n•§ ÕªË×ÙñÍó?|ÉR3{¿¾‡6ÒnÚRûúæ}Z”´¡ëån endstream endobj 16 0 obj << /Type /Font /Subtype /Type3 /Name /F34 /FontMatrix [0.01204 0 0 0.01204 0 0] /FontBBox [ -1 -19 45 58 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 40 /LastChar 125 /Widths 229 0 R /Encoding 230 0 R /CharProcs 231 0 R >> endobj 229 0 obj [43.59 43.59 43.59 0 43.59 0 43.59 43.59 43.59 43.59 43.59 43.59 43.59 43.59 43.59 0 43.59 0 43.59 43.59 0 43.59 0 0 0 43.59 43.59 0 43.59 43.59 0 43.59 43.59 43.59 0 0 43.59 0 0 43.59 0 0 43.59 43.59 43.59 43.59 0 0 0 0 0 43.59 0 43.59 0 43.59 0 43.59 43.59 43.59 43.59 43.59 43.59 43.59 43.59 43.59 0 43.59 43.59 43.59 43.59 43.59 43.59 0 43.59 43.59 43.59 43.59 43.59 43.59 43.59 43.59 43.59 43.59 0 43.59 ] endobj 230 0 obj << /Type /Encoding /Differences [40/a40/a41/a42 43/.notdef 44/a44 45/.notdef 46/a46/a47/a48/a49/a50/a51/a52/a53/a54 55/.notdef 56/a56 57/.notdef 58/a58/a59 60/.notdef 61/a61 62/.notdef 65/a65/a66 67/.notdef 68/a68/a69 70/.notdef 71/a71/a72/a73 74/.notdef 76/a76 77/.notdef 79/a79 80/.notdef 82/a82/a83/a84/a85 86/.notdef 91/a91 92/.notdef 93/a93 94/.notdef 95/a95 96/.notdef 97/a97/a98/a99/a100/a101/a102/a103/a104/a105 106/.notdef 107/a107/a108/a109/a110/a111/a112 113/.notdef 114/a114/a115/a116/a117/a118/a119/a120/a121/a122/a123 124/.notdef 125/a125] >> endobj 231 0 obj << /a40 170 0 R /a41 171 0 R /a42 177 0 R /a44 178 0 R /a46 179 0 R /a47 180 0 R /a48 221 0 R /a49 222 0 R /a50 223 0 R /a51 224 0 R /a52 225 0 R /a53 226 0 R /a54 227 0 R /a56 228 0 R /a58 181 0 R /a59 182 0 R /a61 183 0 R /a65 184 0 R /a66 185 0 R /a68 186 0 R /a69 187 0 R /a71 188 0 R /a72 189 0 R /a73 190 0 R /a76 191 0 R /a79 192 0 R /a82 193 0 R /a83 194 0 R /a84 195 0 R /a85 196 0 R /a91 172 0 R /a93 173 0 R /a95 176 0 R /a97 197 0 R /a98 198 0 R /a99 199 0 R /a100 200 0 R /a101 201 0 R /a102 202 0 R /a103 203 0 R /a104 204 0 R /a105 205 0 R /a107 206 0 R /a108 207 0 R /a109 208 0 R /a110 209 0 R /a111 210 0 R /a112 211 0 R /a114 212 0 R /a115 213 0 R /a116 214 0 R /a117 215 0 R /a118 216 0 R /a119 217 0 R /a120 218 0 R /a121 219 0 R /a122 220 0 R /a123 174 0 R /a125 175 0 R >> endobj 232 0 obj << /Length 103 /Filter /FlateDecode >> stream xÚ37Ñ32W0P°PÐ52S03R† )†\…\¦ aS¨Tr.—“'—~¸‚©9—¾‡‚)—¾§¯BIQi*—¾S€³‚!—¾‹B´¡‚A,—§‹Bý0`€PÿÐi˜<—«'W ¦5° endstream endobj 233 0 obj << /Length 102 /Filter /FlateDecode >> stream xÚ32Ó35V0P0b#CCc…C®B.C˜ˆ ’HÎåròäÒò¹ô=À¤§¯BIQi*—¾S€³‚!—¾‹B´¡‚A,—§‹ƒýƒúõþÿ€AÏþ—«'W !‘$‡ endstream endobj 234 0 obj << /Length 256 /Filter /FlateDecode >> stream xÚ}бNÃ0€á‹ó[ñòŽ«í#•Ú[wж¾£¯Ïï7´«ûkÊÑ®é)§ìë5€Ú‚,ÝÇH‡Y˜1Fu˜EÃ1˜Û$Ì`„Ú³$ª] ½ciÕÝiÇ’˜¶MÓ6Òj T§Ä%˜0Òú©`t‰è)ßšô »µýÚ£Éî§ûì0„R7¡ ŒÇ’A¢«Ó\—þt‚‡dèC@ëf;„wÛ€75>à/G°ž% endstream endobj 235 0 obj << /Length 208 /Filter /FlateDecode >> stream xÚÑ= Â0àJ‡Â[rß LK©¥S¡V0ƒ “ƒ8©£ƒ¢s{4Ò#tìP“ö¥qj |ä‡÷Ã[Æ‹$Dõ^†Åx àQ¢Î¾>ê‡ó 2ü€Q|£n‹->¯+ðl·ÂxŽÇýˆ¥^oÇémIiTEí¸²êud=X4ƒi;87v¶LNó7މoò™üTÏŒêd²T}Xö÷_õ§—QOË^Wþo5Q;ŽG2Ê7öOõ×Ò<êq.ÖœÔWX ØÃuRÖä endstream endobj 236 0 obj << /Length 263 /Filter /FlateDecode >> stream xÚ½‘=NÄ@ …¥ÉÍ!¾L"±ËnC¤e‘H¢J ´$GóQr„-·­ñŒ7qF}#[ãŸ÷–«Óõ9Õ´ “†–g´XÑsƒo¨¬Sxm™§WÜtî5áZúúxÿ|Á°¹½¤Öª±Û´ (E¸TV";§‘èYäepšÒ{ðJý¥9†~P(eÔRÂé™XföìdH-Ø ÌXq*óKÏíÄ8§ãþ/÷ü§~ÖbyœoƃÑöq?´}Ý`ôƒéáÁô©ÀôºÓïëØ0fW Ø';´¬jœô÷#˜©†úcŠÍªþyÄ< ^ux‡ß³ = endstream endobj 237 0 obj << /Length 196 /Filter /FlateDecode >> stream xÚ37Ö32V0Pa3 Ss…C®B.3 ßÄI$çr9yré‡+˜™pé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þƒ@˜þ¥ÿÃè õ?ØÿÓp,ÿBóÿ‡ÐÌ@@4#P2Íðÿ„®ÿ€JÛÿ@£ÿ@hytúú?iBöÿAu?œ†ú«þª¿aá¥aá ?öÿ¨á[ÿþ°ø@‰Ÿ?P\®ž\\2oÉ™ endstream endobj 238 0 obj << /Length 184 /Filter /FlateDecode >> stream xÚ}б Â0à+Â-}½'0­Út µ‚ÄI‡‚¯ì˜¡Û¤…¦VÇÇår~>ÅS hR(Šéâ#^ô¦-Ç &ÙŽ"ŽlUÜ"“kºßgdÉfA!²”ö!”)isÞÀKT •¡oéY<py~# ³ˆ?@Iæz­S=©Z¿ˆ¿‹Ah1s–Ì!oâ9)ù–¹ÁÓʦ«:#Ç¥Ä-~·Ê endstream endobj 239 0 obj << /Length 261 /Filter /FlateDecode >> stream xÚeѽJÄ@ÀñYR¦É#džÀMü¸\·pž` A+ ±RK EA±ˆ¾™¾I|ƒ³Sˆgwv/'W,üfþÅn³¿ÓìQEþ4»tÐÐuw8›Ë\ùÑ/®nqÑ¢=§Ùí±Ü¢mOèáþñíâôj´Kº¨©ºÄvIÌ@ƼÚÀ˜À èøU´Á;€é=zÅ‹¬ž'|+ž|1 #G”R (¤ø¹¤2))€RT¸58BÒ )*¤¨¢BŠ ˜0Dtc„㈒ß(rþTd¾†À¿á±<\B¹…"!OÈL¬ÑmÁ%”‚Á£è!ü)ä Y‚Ùµx†n«Äº endstream endobj 240 0 obj << /Length 249 /Filter /FlateDecode >> stream xÚµ‘1NÃ@EQ Mã#ì\Ì*Š •¥$\D‚*J(SAíÍGñ\º°2üñÈ "JË»Ïþ£ïÿÍã]>‘{™Êm”,—éƒ|DÞr!B~ôÊzó’Ó¥d‘ÓÈœ– ùþúùätöú$Pçòϊ˹‘vdW¢º3Vª-p¥uèÁµ›/ˆ «Æ—=›:Ô`Nzº¸wÏèʼn¬8røöØ,œÍVÃpÚž£¯Ý¥xèçóœðdnÿ¿&8둉ç°;æb9©•ßÞ³µ0ÔrEÓªõUXîЂyjóÖA‡^ªýŸó:œŸŸ'?—üÆ¿°ÛÈI endstream endobj 241 0 obj << /Length 165 /Filter /FlateDecode >> stream xÚ33Õ3²P0P0b3Ss…C®B.S3 ÌI$çr9yré‡+˜šqé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þƒ˜ú¡þA¨ÿ õ?øÿQŒÿ€( Ä Êþ2%ÿ…úO&…b ª Pk!Ž€: ì@ˆ'@Ôõ¬q%vŠËÕ“+ 0¾ª( endstream endobj 242 0 obj << /Length 233 /Filter /FlateDecode >> stream xڥѽ Â0ð‡Â->Bï4bÛ­àØAÐÉAAëækù(>BG‡Ð3͇‚uP=¤òAYý‡Ú¯K]¹k̵ÚpÍ&ŽËœÛÈ…MšÊgd ŸÎoç°Úk|x–¯pÿ +‡Â@Zä/0ƒ´d73(Mº\5|¢³3¿WU =e0ƒ>¬ß endstream endobj 243 0 obj << /Length 263 /Filter /FlateDecode >> stream xÚeϱNÃ@ à?êÉyƒÆ/iJ"•¥‘J‘È€D'ÄŒ X{÷hy”^åc¡¯êŠ™D5‡=îþÙü:þé§“ÎÇ|ñ_.þ(Ø_’ IŸ˜4B±±ÌCjÑz8½–nZ:Ð7¡6 endstream endobj 244 0 obj << /Length 152 /Filter /FlateDecode >> stream xÚ33Ó31V0Pa3cS3…C®B.SK ßÄI$çr9yré‡+˜Zré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ìÿƒANúÃÿÌÿêi†úõ Zþ@ˆæ‡Ó5`šNW€ifœôýà˜fÄI3€i0™4?(pÓ\®ž\\wG³æ endstream endobj 245 0 obj << /Length 297 /Filter /FlateDecode >> stream xÚ}‘1NÄ0E'rišÁ>ImVT‘–E"Tˆj¡¤A»öM¸JŽ’#„.HQ†ñxD³‘,?ÛŠ_þÏfu¶r•;çÑ\¸õÆ=ÕøŠÍš×U\ƃý n;,ï]³Æòšw±ìnÜûÛÇ3–ÛÛKWc¹sµ«±Û9O4pÀ/Å-1 6B/À†‚zhy†ü†8$aŽÑ” Ô`6£€_òA ]Š–Äá3D2³ã–†ÒÅÑ‘þ@$‡:Ž ÷HÝQðÔëë4ü‡ì›Ò7›/u˜Qùà½Êl¯l‚–sˆã ¥ñ³p¹z–ÒØ0Iiñb ”(¶$ŽXŽ8býâhCú1àC D\WEáàU‡wøßHµG endstream endobj 246 0 obj << /Length 199 /Filter /FlateDecode >> stream xÚuν Â0ð+„[ò¹'0­~€ÄIí›™Gé#tì =猪‹!ùAþ¹—úù€RÊÉG4Ó!Ã3vYªW}ØŸpR ßP>@¿}±¤ëåvD?YM)C?£mFé‹AhÀ0W–¹pµ•(Ô†Å&áRŽ_ïÕGW«¶RM©Êú1|šŠw5áFò—ú«ýö ]Ÿ÷æ·ñ¯¬5IW¦†º'C»§{p´Ü:ކ«ƒV†#Î \ã 8.y endstream endobj 247 0 obj << /Length 184 /Filter /FlateDecode >> stream xÚ•Î; Â@à )ÓäBænbÄ*#¸… •…X©¥…¢­Ù£å(9BÊKÆY#X[Ìó‚?›M³ŒbJ]-(Ó9Á¦¹ô±kÝâtÅR£ÚSš£ZË•ÞÐãþ¼ *·KJPUtH(>¢®> stream xÚµ= Â@FR¦É2'p³$!vÁ-­,ÄJ--­o–£è ´‹dœ±ò¯æÁ·3ì<6{AŒ†\±Æ¸+ [ˆÎDi,7P3ŒP#¾eƸßÖ ²É5¨çƒ˜->E) ït´ÿD›ŽL®Ì”Z&U¼×!˧Òm,—J¯¿–yÿ"LŸXœÞI?ðåµ]ìÀ&^-Vìæ±gÇž·Zêø¿n$ù̴ɦ†¦p h¥Á endstream endobj 249 0 obj << /Length 191 /Filter /FlateDecode >> stream xÚ]ν Â0àS:wÉ#ä>m©Ð± ì èä Nêè (¸¥à‹õQò3ã­ þ\È'›3ʇEÁ)çrFçï2:RÞߥ}ì¶×”¬$S2{ZÏù|ºì)/&œQRñ:ãtCuňCèà:DávG|‡iÊFy”­öÐV;¡tPo¼0ðáƒÌ7ÀæÙ÷âª{äKxÕNÄ. P¡5­ô €’’ÒÒ‚¦5-éQle€ endstream endobj 250 0 obj << /Length 122 /Filter /FlateDecode >> stream xÚ32Ó35V0Pa#SSK…C®B.#C ßÄI$çr9yré‡+ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž. ŒØÿ0ðÿ!ùÿ("”ªÁþ3Ô#!öÿ ÌÔFÿÿÿ€#.WO®@.Nq endstream endobj 251 0 obj << /Length 105 /Filter /FlateDecode >> stream xÚ32Ó35V0Pa#3S …C®B.## ßÄI$çr9yré‡+qé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þ3üGBìÿ˜úÿÿq¹zrrÊWù endstream endobj 252 0 obj << /Length 188 /Filter /FlateDecode >> stream xÚÝÍ= Â` àˆC!‹GhNà×"Ú ‚ ì èä Nêè (¸µÒÁkyo =Â7:”¾¦ÅÉÁ8„<ù! úín(žt4BMl}>pÐÓº.«ÁfÏ£˜ÍR‚›©vÙÄ39Ï;6£ùX|6‘¬|ñÖGB%%9µ "” 4Dªrr•{Ef‡V5 ÜR×’S^r_Ô,µÿ¬¥»IQiâNÉë[)%ö[ôyü/ Èû[<‰yÁo¨Rµ€ endstream endobj 253 0 obj << /Length 151 /Filter /FlateDecode >> stream xÚ35Ö30U0P0bS#cs…C®B. ßÄI$çr9yré‡+˜Xpé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þ1Ô`øÿùÿ Éÿÿ”gþ$mÿ7°ÿ«’Ìÿ>0Éÿþ`þ‰l@"üÿÿýÿÿ˜$—«'W Žá‰ endstream endobj 254 0 obj << /Length 176 /Filter /FlateDecode >> stream xÚ31×37U0P0bScs…C®B.C ßÄI$çr9yré‡+˜ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž. Œÿ000ðÿÿ$ëÿÿ’ÿþ700ÿc°ÀÀþ‡Aþÿ2 \ i$Á €Êêäò?ˆl •Ä4b>Ä.dÛ!îp!îdræ~ùÿ€$Ø_\®ž\\-in« endstream endobj 255 0 obj << /Length 193 /Filter /FlateDecode >> stream xڭп‚0ðš$·ðÞ h[I;˜èä`œÔÑA£3>Â#02Î+šhÔM‡þ†ûúçK£`¨#Ô8Âc¤1ˆqgàaÌSQðˆ¶H-¨†1¨ÏAÙ9žO—=¨t1A*õA½›¡ ]‘O›Pö±’JA…äy)Iˆ¼r&õÓ~ó®ßþàÇmý—·’ªkÂ]Ÿ{77”Ôx­Ü¿f}N$¹nýCâù&L-,á‹ endstream endobj 256 0 obj << /Length 144 /Filter /FlateDecode >> stream xÚ3¶Ô36V0P0bcsJ1ä*ä26òÁ" ‰ä\.'O.ýpc.} (—¾§¯BIQi*—¾S€³‚!—¾‹B´¡‚A,—§‹Ã?æ ÿÿñÿöÿDM}Ãÿ?þ`ÿ÷áÿæÿ@Ä8ÑPß$쀈` 4'þÿÿ‡Ap¹zrr8WÖ endstream endobj 257 0 obj << /Length 187 /Filter /FlateDecode >> stream xÚ%Œ= ÂP„7¤¶ñÙ˜„‡Æ.à˜BÐÊB¬ÔÒBQ°“£y”á•[„ŒûHñÁÎÌθb2+$˜Š+ä’ó]n: 2ç/*NârN7ærZmåùx]9]ì–bîJŽV9qµ*ý> stream xÚ36×34Q0P0bc#Sc…C®B.#K ßÄI$çr9yré‡+Yré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ø0°<¶‡âz þÁŒ@ÌÄòÿÿ?ø„™bTÂðÆÿ ÿ7~`øøƒýÿ@Ç400ÿcàrõä äÎpR endstream endobj 259 0 obj << /Length 149 /Filter /FlateDecode >> stream xÚ35Ö30U0P0bS#cs…C®B. ßÄI$çr9yré‡+˜Xpé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þ30ØøÿŸÁþ?’ý?ãÿÌ@5J2"‘Ì0’ñ?;ˆlàÿÿ¨Ìèâúÿ€¤üÿÿA*þÿçrõä äðŒ endstream endobj 260 0 obj << /Length 188 /Filter /FlateDecode >> stream xڵб Â0€á+Â-}„Þ hšP:j3:9ˆSutPt®à‹ù(}„ŽJc¼ quù†ËûO¥óTSLŠf’”"­è(ñ‚Iæ†1ií_ª3ÅŽ’ ÅÊQ˜5Ý®÷Šb³ ‰¢¤½¤ø€¦$,D¶¨m`ŸX˜ôP?¦䯰…¨a"GËä „ÝHíè¿°Žáüú’ñ[¹%=ãΡ‹i¸ˆÛ¸’{}9ßàs \Üâ#G— endstream endobj 261 0 obj << /Length 122 /Filter /FlateDecode >> stream xÚ31×37U0P0bCS…C®B.cc ßÄI$çr9yré‡+sé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ä€ÀDübvQ$þÿG%úAüȨÿÿÿÁåêÉÈB•\ endstream endobj 262 0 obj << /Length 231 /Filter /FlateDecode >> stream xÚmÏÏJÄ0ð¯,Ì%ÐZ%c‹ã7¢â!¿02I†ñ|ÜøÖÛz¿ü¾“éGÆ­…Vx|–í,ÍïGi®˜•f¾ö‡×ã“4Û› ßI³ó÷odÞy¸A# ÕŒJõ—&E½8]&”ÃRj ©Ð¤ šÙõKXÿ™"9ãØß°öC¯ú"‚ãƒùÊÞáN¤¶¶šàžç‚ +–o¨q‘Ô ™€ï@æF2ŠÌÏh.ÊpFmLF IÿA.g¹•OÕ¬—´ endstream endobj 263 0 obj << /Length 237 /Filter /FlateDecode >> stream xÚ}±JÄ@†ÿbaš> stream xڕϱ Â@ à– Yú6O`[¼Ò¥T¨¼AÐÉAœÔÑAQèP°ÖGé#tt«—ªtò $áB¢ÓyšpÄ :áDó%¦;騿‘¤Ò8ߨ0XÇnl•B³åçãu¥°Ø­ØVK>Ú/'2%;ŽãµÇÀ%|ÃAtG*èA0‡¬`/ºPu°½Fô19€9¬a{ÑíDíªb#úØj3XÃä5S¯øS… imhO_o`{ endstream endobj 265 0 obj << /Length 229 /Filter /FlateDecode >> stream xڅϱNÃ@ `G"yh_éüp’([+•"5:T #Ö^í%pcó»He``ùÛ÷û\·wm# iä¶”º’¦–ç’߸jQD¹ùéœ^yݱßKղߢ̾{”÷Ïöë§{)ÙoäPÊâÈÝFnˆ(ºžŠèF Ñ©j…Àd|ÉŒL@Àä6ììmБÜT /åˆõ¤sg`À|¸®Œ¿8c†Â¨Ò’5 MñÃÙâ—”i\Qn+ ¥yrŠevœEs¬á‡Žwü Ô4„s endstream endobj 266 0 obj << /Length 235 /Filter /FlateDecode >> stream xÚuÏ=NÄ0à¥Mã#x.N´ŽV[YZ‰HPQ * ¤Aíp³%G0¢ÀE”a²» ÍgûYš¿<]6\±ç“š½çÆóCMÏ´XiXqÓì~îŸhÝ’»áÅŠÜ…ÆäÚK~}y{$·¾:ãšÜ†ok®î¨Ý0`2™€R¤Ó—é†r@ìŠI…ÀærBÈG£b¶dÅþ2lRÌ“V;äxFïò!#äSòÕI§gìµk4I±Yòžñ€;ý!þGøaÜbóžÝ¸óài^aÐeb_È»î+:‚¶‡ÑÚ(4¢ó–®é–•™ endstream endobj 267 0 obj << /Length 200 /Filter /FlateDecode >> stream xÚϱ ‚`ðáÁ{2As‰3È!¨©!šª±¡(hˆôÑzÁñĺïŒt©¡~Ãÿ8îÎûa@ ¨ç‘R0¤‡Gô=9›Îö€qŠîŠ|ÝÇè¦s:Ÿ.{tãÅ„8MhÍ3L®±â“+ÿ"dL-V¢K±x{°pprm î%@%*­!š¥ÞiÉfúÈ£ú1ƒÖºÕh¬´fG«£Ý¨ZŸFéȶ> stream xÚEÐ;N1 `G)Fr“#Œ/³£Ñj«HË"1Tˆ ()@PgŽ–£ä)S„{Aló)Çù“iw¹›iC]Œ4M4Oô2â;n÷²¸¡yþÝy~ÃÂÃm÷8ÜÈ2Ë-}~|½âp¸»¢‡#=Ž´yÂåH`xpœv ú$¸ä"¸,t¹?“”¬¥JIÏRÜsTR/´°vÌ „ –å6£#`f€ÀÁ3G&û-Û]\\ò\´Eõ«åV>R®ô­tŠUÌ?p¦²"ÅFÏ ¶ø¿Ìò¢!ÚS‚S¯`% ^/x?}Ï“… endstream endobj 269 0 obj << /Length 237 /Filter /FlateDecode >> stream xÚmÐ1NÃ@Ðo¹°4°s°­ØŠR­‚„ $¨(U ¤A½¾ WñMØ#¸ÜšapJ‘æ³Úù·]_®;®¹å‹†Û–»–Ÿz£ÕƆ5wÝádÿJÛžª^m¨º±1Uý-¼¾Pµ½»â†ª?6\?Q¿cä Ài‚&dš r¢˜†2!Œ.ÁG?pS8’ôÈ|9‡]ó'ø?‚XP‹T)æL%—ü[2Õ/±jNl¥›þ§”>9Û’¼5þ‰FX ü”éà¢=Ø … Œ–W¨UÊUG@—˜ºîéž~Uí–Ž endstream endobj 15 0 obj << /Type /Font /Subtype /Type3 /Name /F33 /FontMatrix [0.01204 0 0 0.01204 0 0] /FontBBox [ 2 -26 80 59 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 46 /LastChar 117 /Widths 270 0 R /Encoding 271 0 R /CharProcs 272 0 R >> endobj 270 0 obj [26.53 0 47.75 47.75 47.75 47.75 47.75 47.75 47.75 47.75 47.75 47.75 0 0 0 0 0 0 0 72.2 67.93 68.97 73.23 62.74 0 0 0 0 0 0 0 0 74.73 71.73 65.28 0 71.62 53.05 66.43 0 0 0 72.2 0 0 0 0 0 0 74.27 0 46.42 0 42.44 53.05 43.77 0 0 0 26.53 0 0 26.53 79.58 53.05 47.75 53.05 0 39.33 37.67 37.14 53.05 ] endobj 271 0 obj << /Type /Encoding /Differences [46/a46 47/.notdef 48/a48/a49/a50/a51/a52/a53/a54/a55/a56/a57 58/.notdef 65/a65/a66/a67/a68/a69 70/.notdef 78/a78/a79/a80 81/.notdef 82/a82/a83/a84 85/.notdef 88/a88 89/.notdef 95/a95 96/.notdef 97/a97 98/.notdef 99/a99/a100/a101 102/.notdef 105/a105 106/.notdef 108/a108/a109/a110/a111/a112 113/.notdef 114/a114/a115/a116/a117] >> endobj 272 0 obj << /a46 233 0 R /a48 260 0 R /a49 261 0 R /a50 262 0 R /a51 263 0 R /a52 264 0 R /a53 265 0 R /a54 266 0 R /a55 267 0 R /a56 268 0 R /a57 269 0 R /a65 234 0 R /a66 235 0 R /a67 236 0 R /a68 237 0 R /a69 238 0 R /a78 239 0 R /a79 240 0 R /a80 241 0 R /a82 242 0 R /a83 243 0 R /a84 244 0 R /a88 245 0 R /a95 232 0 R /a97 246 0 R /a99 247 0 R /a100 248 0 R /a101 249 0 R /a105 250 0 R /a108 251 0 R /a109 252 0 R /a110 253 0 R /a111 254 0 R /a112 255 0 R /a114 256 0 R /a115 257 0 R /a116 258 0 R /a117 259 0 R >> endobj 273 0 obj << /Length 106 /Filter /FlateDecode >> stream xÚ³0×34V0P°TÐ56P0·PÐ52QH1ä*ä2³ (˜A¥’s¹œ<¹ôÃÌ,¸ô≠ô=}JŠJS¹ôœ ¹ô]¢  b¹<]êÿCÀ(ýÿ\1—«'W ¾ÜF¦ endstream endobj 274 0 obj << /Length 167 /Filter /FlateDecode >> stream xÚ=Ì1 Â@Ћ…˜7›h¢Á-„XYˆ•ZZ(Ú Ù£Ùy!)‚kb@xÕÌ0)Ÿ=žr0a?äpÆGERQzŒ»æp¦D“ܲŠH®š˜¤^óíz?‘L²+’)ï{{Ò)‹'Üq…¼†ýÀÚ?ÓÉ­‰­qßF¼ „ÓAˆÀüçÔ@…^‰~QÀižM;´Ô´¡/óg5ü endstream endobj 275 0 obj << /Length 201 /Filter /FlateDecode >> stream xÚ1Â0 ES1Pyáø´Ä¤`b@LÀÈ‚9=êQzƈ[#FD¬¼áÛþv2öâ„cq?a;æÑ€ ÉÖbÌÖjf¢4£hÃ6¡h!2EÙ’¯—Û‘¢t5eQg¼•že36Æ™úø†È±® …¡ôøæÖÂÈQú–ðáÚÂÊuû—›Àx8ÓÅOÓPŠ FTÂÅ—ª4Y­Ô.uP7uþLщÍtÝD·Ò ÿ‘þÍ3ZÓÉÍp endstream endobj 276 0 obj << /Length 149 /Filter /FlateDecode >> stream xÚ36Ô34R0P°PÐ5´T02U04UH1ä*ä24Š(›@¥’s¹œ<¹ôà ͸ô=€â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ìä?Øÿ¨ÿóÿÏÿ@õÿíÿËÿg?ÏÀ„ò r@h„6 5 ?~0~`þÀü¨³$ÅÃÀåêÉÈû²1­ endstream endobj 277 0 obj << /Length 105 /Filter /FlateDecode >> stream xÚ36Ô34R0P°b#CS…C®B. m„@ $‘œËåäÉ¥äsé{€IO_…’¢ÒT.}§gC.}…hCƒX.OöòìÔÿùÿÖÿ±ÿ!ÿý—«'W áš( endstream endobj 278 0 obj << /Length 173 /Filter /FlateDecode >> stream xÚ35Ó30T0P0SÐ52U05P07UH1ä*ä21Š(Àä’s¹œ<¹ôÃLŒ¹ô=@\úž¾ %E¥©\úNÎ †\ú. Ñ@Ãb¹<]@àˆ`|"™‘I°8;É߀D‚µÊ#‘vH¤ ˜¬A" HÆH$†µÈ²7 [‹l¡†µÈVÐÊZ¢ýÉðÝZº/~ki¼kÀ$—«'W R6N` endstream endobj 279 0 obj << /Length 96 /Filter /FlateDecode >> stream xÚ36×36Q0P0T0´P06T02WH1ä*ä2² (XB$’s¹œ<¹ôÃŒ,¹ô=€„§¯BIQi*—¾S€³‚!—¾‹B´¡‚A,—§‹ÂÿÿÿÂ\®ž\\Ï5^ endstream endobj 280 0 obj << /Length 187 /Filter /FlateDecode >> stream xÚ33Ò32Q0P0bSKs…C®B.S3 ßÄI$çr9yré‡+˜šqé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]øÿ Æÿÿ€9ÿ?©úÿÿ€Ä~0ÿa``Êü«cRòÿØ:ìÿ€5ÚÿSõ`”üÿ†ÿÞÿØ)ö`Šñ˜R( Cþƒ^ ¤yÄPÀø:ô5>ŠËÕ“+ Šc endstream endobj 281 0 obj << /Length 232 /Filter /FlateDecode >> stream xÚí’½jÃ0…¯ñ ¸‹!÷ *;¡8ž ùx¤S‡Ð©Í˜!¡è‹ùQü=ß ’:J§„¾st.*&O“gIeŒY¤’§ò‘ñ§9tƒñ¾çYÅöU¦9Ûªl«µ|¿vlg›¹dl²Í$}ãj!D”(ѨP¤Úƪ§PT7‚Öª^!¿ƒíLGkãùp>ÒžŒ1F=%H+=° Áe‡=.`í€íO,Ñ]oU, ¢£¹áÐ;ñ#`ì®8Œ¿BÜ”ô:îÂK˜yà?"þiƒ×ÿVíïA^VüÂgV‡. endstream endobj 282 0 obj << /Length 291 /Filter /FlateDecode >> stream xÚÑ1jÃ0€a ‚·øÒ jR'YbHS¨‡B;u(™ÚŽZڭؾI®â£ä=˜¼JïIq‰ÁT`ø$/ÿ“V‹«ëµIÍÂ~«ÌäkóšÁ,s»OÝÖýxy‡m É“YæÜÙSHÊ{óõùýÉöáÆdìÌsfÒ=”;#ìÒðkTÑNUç„ÝDö3’8L¤ð4£1è¤裵>+*bôùT)ôÑ?£dÐ C~yE}ˆŽºQÂKZq¾<Šš¥¬8ZµT°b+Ρ1ܼÏ×nÎ N”¿q÷Aªœ(ºF».äÀùgE¤žã…¸$ <†àAéÄñ‚óGÅ.!Ñ šÕP¼Ï/X-Å{Uü°­«£wÅî¿‚ÛáÆÁÊ’ endstream endobj 283 0 obj << /Length 235 /Filter /FlateDecode >> stream xÚ¥ÒÁ ‚@à‘Â\zç ZÑ< f‡ N¢SuìPÔ¹ÍGñ> stream xÚÅ’=NÄ@ …MÉÍ!¾$)Èf«‘–E"Tˆ (‘AKr®’£äS¦XÅØ“Ù,=S$_> stream xÚÅÒ½ Â0ð‡Â-}„Þ˜ìÇV¨ì èä Nêè èl­ÒGpìPz&±M„ˆÐÉ@á—„$åÓ$BgüK|Œ<p8äs9‡3d°-Æ!°%_V¬ðv½Ÿ€eë9ÀrÜèï¡È‘ä°øxë©Ô)Q©TóÅ”ïxÔô²©íe¥4ÈG¤ªzMÄa)[¼"ei=šAikÊëL¹ôM¥!çCÕhÕ×ø.TC×Ê#³¦igÖ^w†£o¶êªî´î¾J„-ã$äŠKH…­We¦N'Q<‹6ð¯?K endstream endobj 286 0 obj << /Length 208 /Filter /FlateDecode >> stream xÚÒ½ Â0à„…[úæžÀ´[' µ‚ÄI'õÑ|£ƒìµÐ´Ö@ໄ\þ.ôû]Ô=ô0âÖƒa»:Ô›=Ä)È%!Èi> 2áéxÞŒçcô@&¸òÐ]Cš ú¶ŒuãŘPŒq‹Á"p3q%ŒÚÑ«áÒ§™ÎÐN°¢€¾ðß(WUyxû¦9ø³8¡ ëÑVÁ6q¯Ã1 D„=¸¢$Ø¡¨•D‰÷/À$…|®±ßd endstream endobj 287 0 obj << /Length 173 /Filter /FlateDecode >> stream xÚ37Ð31R0P0b3S3 …C®B.3rAɹ\Nž\úá f\ú@Q.}O_…’¢ÒT.}§gC.}…hCƒX.O…ÿÐ@€>À`ÿAJ3Bi†z(m¥å¡4?”f‡Ñ 43š+ÍøF3| @3€hf4;”æ‡Òõ`è+¢h˜z„~vö1’HƒiP¤~ ‚ærõä äœÏ endstream endobj 288 0 obj << /Length 300 /Filter /FlateDecode >> stream xÚÍÒ½N„@ðÝP\2 pó ÄX‘œg"…‰Væ*µ4Q£5÷&÷*< °åÆ™`¹øQ{ù±,ìÜÌ¿,OÓsL1Ç“ Ë3Ì/ð)ƒ7(r^L±ž<¾Àª†ä‹’k^†¤¾Á÷ÏgHV·—˜A²Æ‡ Ó Ôk4ü#gÌ«`Id ßKD-XûHT±ú…HžQìd[Ïë;'Ûøë¥n—ü1‰ªÞ“ÕÆi/jœ®óÇ{;_…ã÷ƒZŸÓöX\‹?b.®´ ê¿«QÙ_äËó%þ5Üt×õIÿ¥ôs&µüAÚÉciÇUÝ h’NËN SµÓ¤#þvPHDH‰&‡4MÎÒnL˜Ï•OÝ!“è|&%­Ig]‚«îà ê¤ùr endstream endobj 289 0 obj << /Length 121 /Filter /FlateDecode >> stream xÚ³0×3µT0P0b 3 …C®B.s˜ˆ ’HÎåròäÒW0·äÒ÷Šré{ú*”•¦ré;8+ré»(D*Äryº(ü‚f ñXƒý? øÿƒaä±þƒ)¬‘VøX¤§:.WO®@.Ö 4n endstream endobj 290 0 obj << /Length 104 /Filter /FlateDecode >> stream xÚ31Ô37R0P0aK3 …C®B.cS ßÄI$çr9yré‡+›ré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÁlƒü†Q3è¸\=¹¹‹iƒ% endstream endobj 291 0 obj << /Length 149 /Filter /FlateDecode >> stream xÚ33×36T0P0b3#3 …C®B.Ss ßÄI$çr9yré‡+˜šsé{E¹ô=}JŠJS¹ôœ ¹ô]¢ÆÄryº(ü‚ „hû £4š½?Í£ðÓò8h{4ºþ¡¡43”f‡ÒòPºB3ÿÿŽ×ÿÿÿ¤¹\=¹¹¯½¢a endstream endobj 292 0 obj << /Length 278 /Filter /FlateDecode >> stream xÚÓMJÄ0Àñ”. o“ H›˜dŽÕÂ8‚]ãÊ…ÌjtéBQ讽‰WéM캜Å0ϼøW:…Ðþ(üyÄšüt–+£Îܲf¦òsõhás·aˆt²}†eú^-æ oÜ.èêV½½¾?^®¯”½RV™ T+…xþi[Dü2hé; Ê_Ð.°#ÄŸ ì ÉGˆf È,D¹#¤ ²½ð¯ H_W3H|ÝÀ ¦ ¨gQPÜMAP]Òr :)8P]Ê‚‚ŠiP]Í‚ê®.êY¸ ¸cá‚’ö4ƒ<Ê]:‚l_Œ@êcà0‚˜æÀÂÏŽ… áðáù»%Ãåœü®+¸ƒ/]zœ endstream endobj 293 0 obj << /Length 277 /Filter /FlateDecode >> stream xÚmÒ1N„PÆñ!$ÓpæÉ*l¢!Y×D ­,6Vji¡Ñd;<Úe`Iaö93o,(H~<Âÿ+ mÎÎ×TÑŠ¯vE-½ÔøŽœUr+žßpÓcùHÍË[>Ų¿£Ï¯W,7÷×Tc¹¥]MÕö[ !@‰õí:,è]øáW`¬Ñt~]'Óå¬!LêdDUHZ•KZ•i:j4¥®DGD i•¦Uš6L…KGT:¢Ò´JÓ*M›Â¤Á%#Q’Ž’t”¤'¦Ô%#Q2bâ´‰Ó&N»Ž¦ÜÅ#&N›8mâ´+L\úÉT…+we®tA‰ f ®ÎU,(we#Ä¿RWâ‚Yû›ðXMÑ× endstream endobj 294 0 obj << /Length 286 /Filter /FlateDecode >> stream xÚ½’±NÄ0 †sb¨äå!~èU ë1U:‰H01 ›€‘sîÑú(}„Žª;¶RÐ!F:$_þØŽk{sqVã ×xZa½Áõ%>WðuÅâ k»yz…m åÖ”7,CÙÞâÇûç ”Û»+du‡ì³‡v‡Î¹‚:—>¢˜ö‚H%Ï0„èhâ}ÁGOÉäàNÄhI¢öl+÷­›Ñé"‡$§>ªx$O‰‘Aâ9Ñ3Hà:ƒ7¼¦ICc0C0˜Â” üdÿæ4rªGðËZƹ3h醥AŸ¡°:wß*¯½8,´;$Á¥qQRrº¤WEö¤½g‡Ž½{ !“Љ̳A:>6@ ÃøcòhÙ°Áu ÷ðž¤ö} endstream endobj 295 0 obj << /Length 185 /Filter /FlateDecode >> stream xÚ37Ó35V0PasC3 …C®B.3s ßÄI$çr9yré‡+˜™sé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þƒ„ñÆøcüo€100ÈUòƒŒÿ@ õ  ûPˆ3øúÑ v,ŒÔf [Í=èn†ûæ/¸O¡~0”ñÆ85 †)šˆcp¹zrrÚõÏ\ endstream endobj 296 0 obj << /Length 251 /Filter /FlateDecode >> stream xÚ­Ñ1nƒ0€á‡: ½…”wÖ 4ÈYŠD©†Hí”!ê”d̪™áh9 GÈÈ`ñj°1RaKd}22²äây™PD zŠI¾P"éãeDÝ“¬Ì›ý ³Å–d„b­—Qúù¾Qdo£ÈiSô…ENÜôèÅW§Æ©uâJ3d€”k«¾YA¿¥W©¥í ù©² fuýM¿<7'MÕäž»¥ïnžÚÝ€ASýwMRàö \S¿ošÖ'ðæŠß%u—«vªrChë2<š>úï¿\+#_ç2ò˜o¶cibBרÂ÷?ñi h endstream endobj 297 0 obj << /Length 305 /Filter /FlateDecode >> stream xÚm‘½JÄP…OØ"p›¼€yÍf‰‘aa]Á‚Vb¥–Šv É£åQò)#\î83w‰.x›Ìï9“zu¶ªhI5–t^S½¦—Ò½»j-Á%]2Ïon۸⪵+n$ìŠæ–>?¾^]±½»¢Ò;z,iùäš<àH9àØ0w{‰1‰àÛcÁ]Ω<² h=òQŠ=6 zh¾,ÝŒ$üûýd˜ˆà1bŠðÐ׆«ا¨#X«êéÉA}Éëă¼ÞiMËÖ©¥S¬Ñ-d§ÚpíAÜiÈÌ$ r¢ñÉ0cúðGÖÝ‘»Ò"Øyäž*\ެŠå'¨ªÍ5 ‰Ðš?ŸÛ)¦ÔœhVVQ¥»nܽû÷ó× endstream endobj 298 0 obj << /Length 162 /Filter /FlateDecode >> stream xÚ37׳4T0P0bs3s…C®B.3K ßÄI$çr9yré‡+˜Yré{E¹ô=}JŠJS¹ôœ ¹ô]¢ÆÄryº(Øÿ‡€D1þ1ðÿo`þÿ þˆÁ`ÿ¡þ˜!ÿ¡žÌ`G0ê æ5#F„Á€ñÊøñʨ †Áe0Œ2¨É`'â\®ž\\TÒË. endstream endobj 299 0 obj << /Length 208 /Filter /FlateDecode >> stream xÚí’= Â@…G,Óä™ è&"ù©þ€)­,ÄJ--mMŽæQ> stream xÚнJÄ@Àñ )Ûän^@“øqäš œ'˜BÐê ±RK E»ã.÷f‘{‘tצÜ"dœÙÙUCPœÀò#»,6?;>ŸA 'p”A~ Ó3ý¢óø›ÎdçáY/J¬ OurE¿uR^ÃÛëû“N7éd w¤÷º\Ò(¥Pæ?RE¯x:¥ ôšˆ «"¤XÔ²êBR$jX´¨ˆ–PT³èˆŠI¨b™&|=v,åU°¶¬¹§nX6zm…ñY‰6^çs²D‡VÍÉý­ÈŠ£9^[q>'K´M¦T#É6ºQôÜ©ÿ¡ˆò×N(ÉöÍ×Î)Æ]ëõñ¥½S„ûÆëàâ¡öB§±ú] Q´íÇ*º¿41cÅíXQ3”¾,õ­þhñÀî endstream endobj 301 0 obj << /Length 322 /Filter /FlateDecode >> stream xÚ’»NÄ0E'Ji‚ý$ᵡ²´,) ¢@T@I‚6 âÇ"ñ#ÛÑn¹…Å0?”r‹DǶâ£{'ÝéÑÙ…iÌ ?ݱ9ïÌs‹o¸êxÝø¥?xzÅuõ½YuX_ó.Öýùxÿ|Áz}{iZ¬7æ¡5Í#ö3Ñ\òk:”@Ñ Ø@å©Ü ñ»òTŸT¼95'ÒЀ‰ÉX¦!ÒNHÓ\bÈ„D&:QˆNb‰È„‚Ìë‚ÂëÆH–†L.’¦}$µ ]¤*S™O‹¥[$]Òå½Mé šÓ}‰*¢m¤ß¤S?I¢¿Ù¯¤³Y7ÀÉ%1—gƒŽËÓAÂåEâòTÐqy1—WâòbN›ÓÙœnÈäRN™”èdRÚ“LJt2)Ñɤ¼Ž|Xÿ7¹C ¯z¼Ãaƒðh endstream endobj 302 0 obj << /Length 232 /Filter /FlateDecode >> stream xÚ}ϽNÃ0ð«J¡l¬ü¹³;Ta?ùìûpÛœ7k©äBÎjiÑÃkÍïÜVb»¹Ì7/;Þô¥­8Üj˜C'Ÿ_o6÷×RsØÊS-Õ3÷[¡&Òå±0’Æ`Q·Ð0‘|T*õM *pŠÓŒ_¬°·ÃÅ2ô $ŠL‡o1ÔJc4|îÐåÝœŽä~82ý;á eSz™ñéºÒ)<Æ8`¯ÍŠN9y{ƒÑ2Êhà›žøål¡— endstream endobj 303 0 obj << /Length 229 /Filter /FlateDecode >> stream xÚÅ‘; Â@†7¤¦É2ÐM4ñÑ(øSZYˆ•ZZ(Ú ñhà̶Ü"8ÎÆP+q›æ±óÿ3Íz­ ‡ ¬ú¶±ÙÁµ;MÐÃV‘Ym¡œc€sd4ÁÃþ¸ÙŸÐ9Ä…Þ¢!Š8üˆ¾Â~Âúƒè̸¥Œ+‘fÜ’^Æ áÜke˜ÄÙ"eš,®”æŸˆÕ tŽÞGd?ÀË„bú›$UÊ5â“ÒŠflì$*lóÞÍMgnó ´C¦JÙæhVÊ·3Ë®FÌàiÔp endstream endobj 304 0 obj << /Length 214 /Filter /FlateDecode >> stream xÚ­1 Â@E'l˜&GÈ\@7‘E±1#˜BÐÊB¬ÔÒBQ°’£í‘R¦gEì…áv>ÿ¯™'SŠÈÐ &3!3¦cŒ4#£Nq›ÃÓõ–ÌõRdÔùŠn×û uºžSŒ:£]LÑóŒ’> stream xÚÅÐ1 Â0à”…·äyдÒ*N­`A'qRGEçx¯ä ¼‚7бCéó=q(8‰òÁ ÿŸv«ÙŠ1Ä&]lwqÁ†Øy,ÖÐËÁN1‰Áy 6án»_íûÍpa8‡•‚&:2)Ñ™¡BztòŸÊU™«ÇUN­ËÇ+æIZÔà^Ü>¡àj©‹$qÍ©ÂÆIMîMRÚ'*ùmseÿ c¨ÒL@… ÜI 9Làwn¶i endstream endobj 306 0 obj << /Length 226 /Filter /FlateDecode >> stream xÚu=nÂ@…gåb¥i|Ï’eÅÒYâGŠ‹H¡¢@T’Djûh>а¥ äÉÛX ÉŸVï½yšyñÏÞËD¦òä%¼J˜ÉÁó™C€8‘0Ï/*v[ ÝdvÕ»\/_Gv‹¥xv+Ù¡hÏÕJˆÊžˆ2Õ†(Wí ¨F¢ºO†¶öFF›l@²Ä&¿%`Ý}b —ÝÈzdüeL,¢>2½¿Ýÿ°~dgygL[41Ƕ¦³Š» ÚÖhKy“êJ BaûsµQø óºâ îDŠ endstream endobj 307 0 obj << /Length 167 /Filter /FlateDecode >> stream xÚ36Ñ32V0Pacs…C®B.cK ßÄI$çr9yré‡+[ré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þ700ðÿÀÀPÿÿãÿÿ?˜ÿ÷ÿaàÿÇÿAþ<ø$ìADýÁÿ‡áÿ0ÁüH0 ¤ÿA6b#È4oˆúÿ@ÁåêÉÈèü®  endstream endobj 308 0 obj << /Length 281 /Filter /FlateDecode >> stream xÚ•‘=NÄ0…ÚÂ’!sH›´––E"Tˆ ()@Ðß`¯ä£ä)·ˆ<ÌØ‹Å$Å'ÏÏ{ÏIן5-5tA§ç-ukZwôÜÚ7Û5¤oßZO¯v3ØúžºÆÖ×R·õpCïŸ/¶ÞÜ^Rkë-=ˆÔ£¶ð„/ÀqZq€gÞ XŸxÂqdWŒjï£Ip‹nIU¨ì¤iÿÀ+ÂÿñW%KK"5²-CiÖKìŒ #;–A˜ 58©E,˜ æ½k΢SvàYlK³ S^`‰%*#ÃGÝÅ4dP€ãã”ɲ€1ê:¼^.ei³À¥üiþ‘C–¨žÌ%ý>+éÁ^ öÎ~ÝèÈñ endstream endobj 309 0 obj << /Length 167 /Filter /FlateDecode >> stream xÚ33Ò32Q0Pa3 ²TH1ä*ä25òÁ\Dr.—“'—~¸‚©)—¾P”KßÓW¡¤¨4•Kß)ÀYÁKßE!ÚPÁ –ËÓE¡þüÿOb†PŒF±ÿSöÿ@Ôÿÿ€ÔÁÿÿ©ãìÿ©ó ò ê>ÿ? uBýP?Øÿ©(ÔlÔ¡Dýÿÿ¿ùÿÿø(.WO®@.Jå×m endstream endobj 310 0 obj << /Length 131 /Filter /FlateDecode >> stream xÚ36Ô34R0P0b#Ks…C®B.#ßÄ1’s¹œ<¹ôÃŒL¸ô=€¢\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. 5 Œÿ˜ÿ7°ÿ?Düÿ #ˆ P¨¨’¨?Pÿ1ÿ?ÀH{ôp¹zrrÙðD endstream endobj 311 0 obj << /Length 220 /Filter /FlateDecode >> stream xÚÅϱnÂ0à  H·ärO€“¢´bB*‘©L ˆ‰22´*+ö£¥êÀc¾c"û¿… F,YŸÏ²ÿ³‹A/áŒû~oü:àÏœ¾¨uʰXoiT’YpÑ'3õ»dÊÿ|ï6dFcÎÉLx™s¶¢r‘­"?D+§c¥~DRãdZ¡ÞÛ+-ˆЭARÔ«.à·Z”£§T7œ™ÿrBŠ ‘³Ê°U. (]Ÿ«],ᮣD> 4À¶À§ù®±Hsz/iNW^`ص endstream endobj 312 0 obj << /Length 107 /Filter /FlateDecode >> stream xÚ36Ô34R0P0bc3K…C®B.#S ÌI$çr9yré‡+™ré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ê0üÿ‰™˜qàÿÿÿ7 c.WO®@.„S—œ endstream endobj 313 0 obj << /Length 209 /Filter /FlateDecode >> stream xÚíÑ? ÂP ðˆC!Ë;Bs_ëZA,T;:9ˆ“::( n>'Go qèQz„ŽJcªƒ¸îß—dûÚZ£E5eÚuj¶héâ}O²SÆò°Xc¡ž’ï¡Êu4¢Ýv¿BŽ{ä¢îÓÌ%gŽQŸàh¬@åÌ&àŽlJ2§æDxbΪ…çÔÎUdÂK¬ ÛØ9TùŠ»`Pá+XÜUò.<¼˜ÉS*ñ“©0y1Æß ÍŸoò³–^Š_ˆƒ'øøïü# endstream endobj 314 0 obj << /Length 162 /Filter /FlateDecode >> stream xÚ33Ò32Q0Pa3 eªbÈUÈej 䃹 ‰ä\.'O.ýpSS.} (—¾§¯BIQi*—¾S€³‚!—¾‹B´¡‚A,—§‹C}û?†ÿÿìÿ7€¨ÿÿ©Æÿÿ©öö€Tƒüæÿóøÿ10þŸ¡ö@¨ ìÿÔê6êÀP¢þÿÿßüÿÿ?|—«'W ã[« endstream endobj 315 0 obj << /Length 213 /Filter /FlateDecode >> stream xÚ¥1 ÂP †#B–¡¹€¾[¥S¡Vð ‚N⤎ŠÎõh=JбC1&¶ÕE\|>øóó’?ádäùäј†>…c &tðñŒA$¢GÁ´éìO˜X4 "4 ‘ÑØ%]/·#šd5#MJ[ùh‡6%·y=æ\0`..³ªYå°€óßAK<ý@\À@Q‚#6·§-WQwˆu©;Sðwð ÷?ñkB·KƒnÏú•¾ÍÐ&jÑ×´…„–ìùû1³´Áa®>7k.ˆs‹k|]Åf endstream endobj 316 0 obj << /Length 227 /Filter /FlateDecode >> stream xڵѱjAàY,„i|çtïôN´Œ‚Wbe!V&eŠˆÖç£-ø>B|„-¯Xÿ•D„ÄT±X>ØÙeçŸíuÚLéJ+HÞ—,—×”?8»‰ô²¯ÒêGÛ¹äÛ)öÙϲYoߨŽ^ž$e;–E*É’‹±P鑪SݽêT+ðé†(5OTÓ@u%ƒBMwF=p§±ŒºoHý-euŸaø~ÏÿììÒnlÞ]£Tȇ`1æ)†6AâÆ¯bXiú DAãŸü O žñ¥ÜÆ endstream endobj 317 0 obj << /Length 237 /Filter /FlateDecode >> stream xڵѽNÃ0ð‹2Dº¥o@îÀ1²‘²©-`b¨˜€‘¡¬8oÀ+õ ú yÊV‰ÊÇ?0¡N0X?éîlßÙ¾<±§Rˆ“c[Š/Åyy°¼dï-äÌ©û'žÖlnÅ;6—ˆ³©¯äyõòÈfz=Ëf. +Å×s!ªZ:"JuOçDUzELµº›´‘mÓˆŠu2mè3¢(€ˆâH9Àªö? QízÂoèöï îûni`l7šGÉ€vc6‰C¿#¯Û|‚ê[·Ic7qЇÖ=ý™ÿD¦ø˜ðEÍ7ü\ͱ! endstream endobj 318 0 obj << /Length 161 /Filter /FlateDecode >> stream xÚ31Õ37U0P0bcS…C®B.cK ßÄI$çr9yré‡+[ré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]êêþÿoüÿàÿÿæÿþÿïÿÿHôÿùÿ¾ü?æÿûäÿ1þß"~À‰`‚ÿãÿì?€ã ÁÀ€L 7ñÿ?Ðbl—«'W n endstream endobj 319 0 obj << /Length 223 /Filter /FlateDecode >> stream xÚE1NÄ@ E?šb%79Âø0;Úì"ª‘–E"Tˆ (·AKÜq­%GH™"б´4o4ßßþv]_ä+^sÍç™k{wüšé6[í{¹T^Ž´o(=òfKéÖdJÍ~|½QÚß_s¦tà§ÌëgjŒ8êU•ʇ R:EZ Ê·cªV¢ÿG@­‚V‡•ŠjçU'Øø„3r¸Ø¹Ó–½µ—£å:ªÓ ¾Fg ñ¾©u·Ð1Ìv¥Mª#†bj¿2;Ý4ô@¿* endstream endobj 320 0 obj << /Length 173 /Filter /FlateDecode >> stream xÚ31Ö35S0P0RÐ5T0¶P03VH1ä*ä26 (˜™@d’s¹œ<¹ôÃŒM¹ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. Œ°ÌXv8Á'äá„=ˆ¨ÿ3ˆàÿÿÿÃ,X  wˆ'€þüÿùC=„`?À`ÿƒ¿Aþ<Ø7@ïÿÿ ¡ÿ? ærõä ä ,t endstream endobj 321 0 obj << /Length 166 /Filter /FlateDecode >> stream xÚÕÊ+Â@ài*6Ó#0€í6ÝÚ&¥$¬ … (ŠD@@/G[Ç5ê°8¤Ã‚¨Á£¾ü"e9¥”ÓÐP!Zj îÑZ)%Ÿe³ÃÊ¡^’µ¨§R£v3:N[ÔÕ|LuM+Cé]MàD Ì!æßÄ a9PIÒcУd€/-x>ƒo£;wàê*”Ì!aVBÌÝð7õœ8\à ¦ä¤d endstream endobj 322 0 obj << /Length 216 /Filter /FlateDecode >> stream xÚ}Í=jÃ` `-¾A¬䳋M)˜òõPH§ !SÚ±CC ÉÑ|”Á£'ꫯ¡¸’oþ4J$ëüQ²LÞSþâ<ÜØh‡õ'+v É3v/ز«^e»ùþ`7žO$e7•e*ÉŠ«©¨*…ÚÝ#ÐÑ3‘Q€Æs;Ðþ*ÑØ— ø‰/‚Ô@iàh#2ê+1@îð„[|áiöÆ¡ÙyÚÖ(ÛÆsöÄç“G=‘Ö· ·G¨Ô#¸ô¡î–ʳŠßøà•pH endstream endobj 323 0 obj << /Length 234 /Filter /FlateDecode >> stream xÚ}±NÃ0†ÿ(C¤[ú¾'¨”±4R[$2 ÁÄ€˜€‘¡lU›GKß$/à Çù¼0Õ²õéì»Oþ››euÅ%ÇÓ\s]ó[E;jj­ËXƇ×Zw䟸©Éßé-ùîž?÷_ïä×®Èoù¹âò…º-‹ü¢•p ÐÀiB1íŒE¸ mQ,GE!ýA‘Ë0)29÷Nò3Dœ¤hœIƒ¤AÒ iþ¡1µ„„Éæô7ºVÎpHšÉ4Y0Ml¾3ÃEˆg¡°²P1€jDßEæK ÛŽé(kЉ endstream endobj 324 0 obj << /Length 267 /Filter /FlateDecode >> stream xÚ}ϽJÄ@àRn“7pî h~˜(Âb`]Á‚Vb¥–ŠB !y´ø&û)Sdw<óƒd„>¸ÃÌ™SŸ¥äRÊq™Ku&ZËsÁo\iLs9Õáèé•× g÷Riή1笹‘÷ÏÎÖ·—Rp¶‘‡BòGn6bŒ¡ØÌÿ™-Ñ‘eFGZ0ý‚Ucc^ÏpGí))€¡$ ·ô)ˆY†€È=ò ÜÆ¯ã—¥[Ç4Yêitìj·uGj†¿ wAlhA´_Bóí“gô6U¹ÊT÷¶2uƒ­Œ¶2H¾–òø’ƒo÷í^î_Ë„>áë>ƈ¯¾ã ø‹ endstream endobj 325 0 obj << /Length 208 /Filter /FlateDecode >> stream xÚm±ŠA †±XH³0ywGAnÁSp‹­,Äê´´PÎÚy4eáJ 1&ñ´20$Ã÷g&{C.¹¯'8FÞT´§Xi_Zk?;7T,9VTÌtJEóÍ¿‡ã–Šñü‹u:á•*kj&D+½áAZÔ7„³3á¤C@.¨Ñ‘?|þ³+­2“3FÈ%½¨JU•ªj=¨p®>i05K¦¾¨™ïΓ©9´€ÜÕàê“¶»öÝ'ß-Æ®øão°Ï½#MZÐ'´}Õ endstream endobj 326 0 obj << /Length 211 /Filter /FlateDecode >> stream xÚÅ‘±‚@ †kLºðôôÀŽ$ˆ‰ &:9'utÐè Æ£ðŒ „Ú£ º¸š\¾Üý½4×ï¢xîäSH³€¢Å]¼c¸”Ч8ÖÊù†iŽæ@áÍFb4ù–ž×Mº[Q€&££4:až@ÒÀ„Yè2×0KT4^ÀÕ´—¢]N/ÇrÚ¡”ŠÊµ¬]¹œÔže£´“vd൅e÷›lÙÿ‹¿ßö5ÑÀÏyÕ€ÚP3jɱÞJY²Q“£U5¬¶æôpãß³ÛÀ endstream endobj 327 0 obj << /Length 126 /Filter /FlateDecode >> stream xÚ35Ó30T0P°b 3S…C®B.c ßÄI$çr9yré‡+[pé{E¹ô=}JŠJS¹ôœ ¹ô]¢ÆÄryº(000````ò ¢H0ÿö@âÿ,Äáÿ0%#Œzÿÿl—«'W ØšŸ endstream endobj 328 0 obj << /Length 266 /Filter /FlateDecode >> stream xÚmбNÃ0à‹Åöï³Ïãú¢|ïGý¿ýÓÀ/¼Òq¯CýyÜófâîίFî®0ËÝtíß^ߟ¹ÛÜlýÀÝÎߣÌO;O$™ˆ9Á 1!˜rðHõâ°Ðdš…Úˆõ4›f¢&˜ç‚p–B•l9{„ôŸÈÃÕ6©8ù,Ö´Â/õvîK¤qb´ûÒ·í¢+tÍÙŠ%+ ¿N»C7¶É"­EB´8Ñè¤V‹êP Í#R¨I*š‡h~ jÁ:¹Rᕤè[I®ÍÆlÍ`Φü˜þÊ—ßò'‰Ä& endstream endobj 329 0 obj << /Length 258 /Filter /FlateDecode >> stream xÚ…±N…` …{Ã@Òåú $÷g%¹^Ltr0NzGÎðh< ÀÈ@¨=…ãâò íééicu]”RH”«Rb)U”·’?ø­XHU­×w>5œ?É1r~geΛ{ùúü¾p~z¸‘’ó³<›Ñ 7g!Ò‘ˆRUc¦ÚµŠ’R;Q2Q½P:X Ja2m0{´þ£ëûtÆ”yíl[ÀJ8ƒ XÏ í¥-ÖAvH¸xÎiO›zÚM¹Í÷YýSgâ¢ÄV6ë•Óo†¬GÐbìÔùÇÉÆï2ޏ´ÀºC’lÄLñUú‡[ÏŸù]~(ß6üÈ?údµ£ endstream endobj 330 0 obj << /Length 216 /Filter /FlateDecode >> stream xڭбjÂPà„ ³ärž 7ÁDpI *˜¡ÐNJ'utPÚ-4Ù|-7_ÃÍÕ­…ôæÿmzàÞs/üœ{ÓñCk¤#»Ò‘ŽS]Ų•dbû¨k»‹åFŠRÌ‹&1 {*¦|Ô÷ÝÇZLñ4ÕXÌL_mÌ›”3ulåŽó‡š´Ø]â ðI@B’¨I Ü/àßsÁ„ÌÌÈ'©È¸à€ßsABN–‘jÀ¸à€AOB¾/#ù&-ª¹Çï¿ü'5£o#óRžåŒÔ‘ endstream endobj 331 0 obj << /Length 253 /Filter /FlateDecode >> stream xÚ¥Ð1NÅ0 `?uˆä¥Gx¾¤‘^:éñè€bF¬4G Ç GÈØ¡j°]&`£ª>EIcÿµï;Gy:räõžî>áÎófG}¿žÜ=â~@{M;öœ·Ñôòüú€vyJín¸Ð-2ЀÉL]_~ÔEÕI-jV£¸€8«Yåz&Á? …}—Bæ£Öæs훃$–SéÂhjääMM|wSSYNñ-ðµŸN¿m£²8±®NZôTÜÔ2fé5J÷ü’äD 2ЏMÐrà[μ©Ñ‚΂̿˜51ÿ=ž x…_‚²¶d endstream endobj 332 0 obj << /Length 264 /Filter /FlateDecode >> stream xÚ}пJÄ@ð9®LsoàÎ è&p›6pž` A+ ±RK EëÝGÛGÉ#¤Œîs&åüƒ~Ålvfö õIYI)AŽ+ •ÔAî+~âuÐb)u½?¹{äMËþZÖý¹–Ù·òòüúÀ~sy*û­Üh£[n·B´@""‡^­H1Ñj$—¨éÉeŠÅLЯÓ; tËY½Ñ;su ÓVÈfLæ5*}:˜ñ›…ý;8ÝCD§á­×ëxÏ:H:n2Áæfìfu«Y›ÛÿrÐVÿµùißL=Ý’½züÊ! å´äŽmNû@¢½Hö´ h––ö”‡ø¬å+þy×- endstream endobj 333 0 obj << /Length 214 /Filter /FlateDecode >> stream xÚ¥Ï= Â@à )Óäf. ›@LìÀ‚Vb¥–Š‚…hŽ–£ä)SuvVŒ°qŠv–÷–íF? Ÿ"jÔ )ŠiàØ—¼î™›õ ª…1ª ¯Q%S:N[TƒÙT#ZrÑ “µ@g¬ÄϽi¿¶K±s13Þ´é•»úpa¯bg¶ÔZ¢]ð œ 7S­—‚DA¢ Ñ·å±…ÖݼÖ3fRóáÍ(õZ«¡ý¾t~êþ¡s—Wê/â8Á9>?æŒ endstream endobj 334 0 obj << /Length 290 /Filter /FlateDecode >> stream xÚU±NÄ0D7JÉ?!þH"]ÒZ:‰HPQ * ¤AíHüX>ÅmJ–—Ù=N:š'y¼ž™õ8]öƒëÝè.7nÝË`ÞÍn„Ø»i:Þ<¿™ýlº·MwÙtó­ûüøz5ÝþîÊ ¦;¸G=™ùàˆÂFD53h™W"Ï ),m¦*S]¨NT1Õ™š(WB¿X^lÁöÄxÆM™”E'YÞ¶HB’b3œ-—ªPÃü…?IJqD´¶bmN £¶MʬJÑÆ<K“e›àÑAñzó‘VDlaAD‰ƒ!I„W¶J{Ææ?1߈íx’^¶Ž~ÓM“ü•-ò{ ÊÝ(kÏM;¯Ú†$‚¹žÍ½ù«C¾ endstream endobj 335 0 obj << /Length 265 /Filter /FlateDecode >> stream xÚ?JÅ@Æ'¤X˜foàÎ4 ¼Mx>Á‚Vb¥–ŠvBr´%GH¹Exã7I@E !ü 3Ë|b}VVRJ”ÓJb%u”ÇŠ_x1,¥®×ÍÃ3ï[.ne¹¸Ä˜‹öJÞ^ߟ¸Ø_ŸKÅÅAîpèžÛƒu9=‚AµÇ@u$Ò±™(ÓÞ'Ê•ÜLîhŸŸí7ÌXQcìWv @Ú8®Ô/Nÿ`ú“™¦î3¶1Ì&“šÜBX=Ñc¸¢Ë­fQò:¨Åƒ.rÿ$Âc³1ŒÞÞaÉØ˜VÿÖä@¿r&¸Âã0: ƒôS®ìYùZÛ™Z>´mJÎêç‹–oø3çÕã endstream endobj 14 0 obj << /Type /Font /Subtype /Type3 /Name /F32 /FontMatrix [0.01004 0 0 0.01004 0 0] /FontBBox [ 1 -30 102 75 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 28 /LastChar 122 /Widths 336 0 R /Encoding 337 0 R /CharProcs 338 0 R >> endobj 336 0 obj [62.24 0 93.35 0 0 0 0 0 0 0 0 31.12 0 0 56.01 0 31.12 37.34 31.12 56.01 56.01 56.01 56.01 56.01 56.01 56.01 56.01 56.01 56.01 56.01 0 0 0 0 0 0 0 84.59 79.64 80.91 85.86 73.53 70.42 88.05 87.59 41.72 0 0 67.31 106.26 87.59 84.13 76.53 0 83.56 62.24 77.91 86.09 84.59 0 84.59 0 0 0 0 0 0 87.13 0 54.46 62.24 49.79 62.24 51.11 34.23 56.01 62.24 31.12 0 59.12 31.12 93.35 62.24 56.01 62.24 59.12 45.75 44.19 43.56 62.24 59.12 0 59.12 59.12 49.79 ] endobj 337 0 obj << /Type /Encoding /Differences [28/a28 29/.notdef 30/a30 31/.notdef 39/a39 40/.notdef 42/a42 43/.notdef 44/a44/a45/a46/a47/a48/a49/a50/a51/a52/a53/a54/a55/a56/a57 58/.notdef 65/a65/a66/a67/a68/a69/a70/a71/a72/a73 74/.notdef 76/a76/a77/a78/a79/a80 81/.notdef 82/a82/a83/a84/a85/a86 87/.notdef 88/a88 89/.notdef 95/a95 96/.notdef 97/a97/a98/a99/a100/a101/a102/a103/a104/a105 106/.notdef 107/a107/a108/a109/a110/a111/a112/a113/a114/a115/a116/a117/a118 119/.notdef 120/a120/a121/a122] >> endobj 338 0 obj << /a28 280 0 R /a30 281 0 R /a39 274 0 R /a42 275 0 R /a44 276 0 R /a45 279 0 R /a46 277 0 R /a47 278 0 R /a48 326 0 R /a49 327 0 R /a50 328 0 R /a51 329 0 R /a52 330 0 R /a53 331 0 R /a54 332 0 R /a55 333 0 R /a56 334 0 R /a57 335 0 R /a65 282 0 R /a66 283 0 R /a67 284 0 R /a68 285 0 R /a69 286 0 R /a70 287 0 R /a71 288 0 R /a72 289 0 R /a73 290 0 R /a76 291 0 R /a77 292 0 R /a78 293 0 R /a79 294 0 R /a80 295 0 R /a82 296 0 R /a83 297 0 R /a84 298 0 R /a85 299 0 R /a86 300 0 R /a88 301 0 R /a95 273 0 R /a97 302 0 R /a98 303 0 R /a99 304 0 R /a100 305 0 R /a101 306 0 R /a102 307 0 R /a103 308 0 R /a104 309 0 R /a105 310 0 R /a107 311 0 R /a108 312 0 R /a109 313 0 R /a110 314 0 R /a111 315 0 R /a112 316 0 R /a113 317 0 R /a114 318 0 R /a115 319 0 R /a116 320 0 R /a117 321 0 R /a118 322 0 R /a120 323 0 R /a121 324 0 R /a122 325 0 R >> endobj 339 0 obj << /Length 189 /Filter /FlateDecode >> stream xÚ1 Â@E°L¡70sÝì ’@°ˆÜBÐÊB„€ZZ( 9ZŽ’#XZ:IV›t«þ 3ïOÌØÄrÄ#²‰xjø¨éBºN%7nt8SjImYǤ–’“²+¾]ï'RézΚTÆ;ÍážlÆ@TðJô ø@ ðhxÁ«jze/¨ š]aöåÙáýÝ;¿íÇÎAdDÉ/ak+ÚÎ?i¶¥”T“‚RSÊ"§…¥ }G«@ endstream endobj 340 0 obj << /Length 188 /Filter /FlateDecode >> stream xÚ1 Â@E¿¤L/ :ÐÍ®A"ˆEŒà‚Vb¥–‚Š‚…EŽ–£äÁÍ$±ÐNxÕÌgæý¡˜1‡qß„l">hº.§!Ǧ^íO”XRÖcR 7'e—|»Þ¤’ÕŒ5©”·šÃÙ”s Î@ t€h~//i¹ÝKxO`L®Ð“tIVãçßxÅ?üÞù¼¨>ö‡©(=C±uÚ•¿/ñ@ªÅRÓr•iniMoEËBs endstream endobj 341 0 obj << /Length 165 /Filter /FlateDecode >> stream xÚ33Ñ3µP0P0WÐ5R²LLR ¹ ¹L @ÐÄ "“œËåäÉ¥®`jÀ¥ïæÒ÷ôU()*MåÒw pV0äÒwQˆ6T0ˆåòtQ`Æ`нLÉI†`’ù˜â‡ˆÙ@¨©˜RŒ)öÈ&U@¤c Œ‚ B•@5@µÃ ƒ µj-\ò²ÑÍ;@¶e¸\=¹¹³+ endstream endobj 342 0 obj << /Length 161 /Filter /FlateDecode >> stream xÚ33Ñ3µP0P0WÐ5R²LLR ¹ ¹L @ÐÄ "“œËåäÉ¥®`jÀ¥ïæÒ÷ôU()*MåÒw pV0äÒwQˆ6T0ˆåòtQxÀJB±SŒ \Å¡˜!’ Ø%¡æý@5¯bÙ–A)~d%P PírÈFC-‚Z+‡ì$¨QL‚z…DK ¾árõä äµd*… endstream endobj 343 0 obj << /Length 103 /Filter /FlateDecode >> stream xÚ33Ñ3µP0P0WÐ5´T2u MR ¹ ¹L @Ð*•œËåäÉ¥®`jÀ¥ï¡`Â¥ïé«PRTšÊ¥ïà¬`È¥ï¢m¨`Ëåé¢PÿÀäÿP *ÈåêÉÈ- +´ endstream endobj 344 0 obj << /Length 109 /Filter /FlateDecode >> stream xÚ32Ö30W0PaCs3…C®B.K ×ĉ'çr9yré‡+Xré{¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]dêþ7 ÂzlÐ+”Á Ѫ-õ@>—«'W Êî/ä endstream endobj 345 0 obj << /Length 130 /Filter /FlateDecode >> stream xÚ-ɱ Â0…á gð 2œ'0¹-¥™k3:9ˆ TGAEçæÑòfÚ¢|Ûÿ—ÕÒ7ôlXUÔÀ:ð¢x@='eý;ý m„;P=ÜfÌpqË×ó}…kw+*\Ç£ÒŸ;Zä“Fy2d›åÏd“L*R!s™ÉB¬¹ËY°ŽØã ,P#Œ endstream endobj 346 0 obj << /Length 164 /Filter /FlateDecode >> stream xÚ31Ô35R0P0U02S06W03RH1ä*ä26 (›Ad’s¹œ<¹ôÃŒ ¹ô=€Â\úž¾ %E¥©\úNÎ @Q…h žX.Oæö8qsƒÍ憺Ì ÿê››ÿØnÿÁÿ¸ÿóïý ÿÿ10Øÿ``àÁ 6P $RR ÒÒ 2d>»@nárõä äT¶Dí endstream endobj 347 0 obj << /Length 131 /Filter /FlateDecode >> stream xÚ-É1 Â@EÑ?^á ¦xЙ‰‰mŒà‚V"ÑRPÑ:³´Ù™&Nwo¾\ø’ž%红V\ó¦xA=y1žö:À¨n×w¸°ççý½ÃÕ‡ ®áYé/ ­tò‹½4è’M22ÉD³˜ÉT&2+•<å*ØñBÛ#´ endstream endobj 348 0 obj << /Length 94 /Filter /FlateDecode >> stream xÚ32Ö30W0PaCsK…C®B.K Ïȉ&çr9yré‡+Xré{€O_…’¢ÒT.}§gC.}…hCƒX.O†z†ÿ 0XÏ ÃÀåêÉÈ[\w endstream endobj 349 0 obj << /Length 153 /Filter /FlateDecode >> stream xڅ̽AÅñ ɉ¨ŠóÌ—eëµSH¨"‘ ” ôÍ£xw³ÓN¦ø5çæþgvZ8œ8K¿àÜñbñ€·²–>žÎ7TzOo¡×²C‡ _Ï÷ºÚ.)k̓<j*¥zÑP ¢±‰R˜è.NÑO|[ƧÕmÈÜÏdSéL6•Îeé\6•NdV;üxÔ*Æ endstream endobj 350 0 obj << /Length 101 /Filter /FlateDecode >> stream xÚ32Ö30W0PaCsc3…C®B.K ×ĉ'çr9yré‡+Xré{¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]dêþ7À`=ƒ 1S—«'W fp"¸ endstream endobj 351 0 obj << /Length 140 /Filter /FlateDecode >> stream xÚ32Ö30W0P0WÐ54S0´P06SH1ä*ä24PAS#¨Tr.—“'—~¸‚¡—¾PœKßÓW¡¤¨4•Kß)ÀYÁKßE!ÚPÁ –ËÓEA†¡žá Ö3È0຀`ý™ PÈx€±±¹™¨Ò‚¡€!ËÕ“+ &,• endstream endobj 352 0 obj << /Length 107 /Filter /FlateDecode >> stream xÚ33Ñ3µP0P0U04T03P06TH1ä*ä25 (Ae’s¹œ<¹ôÃLM¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. õÿAà˜üÿ‡Îj-Ô\®ž\\~,Ü endstream endobj 353 0 obj << /Length 162 /Filter /FlateDecode >> stream xÚUÌA ‚@à7 ÿÂu ÁÿŽXÓJ0ƒfÔªEBµ ,jímŽâ¼AiÒ"ßæ=xj1›kŽû¤)«%gš/ ÝI¥ÊÆå|£Â<°Ò$7}MÒlùùx]I»'$K>&ŸÈ”ÂGƒÈ½mÞ~¹¼ûi\Ô…ÎáðG8Ô¢x­8ÂM lÏŸj„¨0­ íéb+12 endstream endobj 354 0 obj << /Length 94 /Filter /FlateDecode >> stream xÚMÉ=@PEáþ®â®À¼™x¨ý$^!¡Rˆ ¥‚°{ äTß±4J2:*5¡Å4嬨`ö¢£ÿÆ´"žfšû¹@ò¶ BJJ7"”¼ï몀Ði ‹ endstream endobj 355 0 obj << /Length 165 /Filter /FlateDecode >> stream xÚ32×3³P0PÐ5T06V0²P0µPH1ä*ä2‰(™B¥’s¹œ<¹ôÃj¸ô=€â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. Œ Ì Øð107È0°3H0°1X0ð10ð00È0$E@øPôPŸc0nøß`ÿàÿû0\@Œíø€Ìärõä ä;g0÷ endstream endobj 356 0 obj << /Length 351 /Filter /FlateDecode >> stream xÚ5‘ÁJÄ0Eo Xb·6? í ¶Vf`T° AW.DÔ¥ ¢àbÀúeü‘|B—]Æw“6‹Hšóî{-Oæ&7…9,Lylʹyšé7]Tr˜›ò$Ü<¾èu­³[ST:»”cÕWæãýóYgëë33ÓÙ¹¹›™ü^×çÈz@´%[Ä µH~, „p@ìp€/ ±Xb¤VöðÝÈó}§äí“íöòÕ$í—@‡)…»@?° ½§éc˜ŒlSŸT¤_2øz>:)zÉSQ/w9õ’÷•zæ§žýPÏþ¨g¿ÔS@=×Ê "mÃÍ¢"{tSøí_¶‘Û‡£\L:eÍR@5Rl#² L7‘¥^ Zê7û] gOª‘.P²y&#›àMYYê¬.IÅŸ«gÂØÏž¹ýp¤?éËGúTl]úfbÖÒµ¾Ñÿ&¨† endstream endobj 357 0 obj << /Length 172 /Filter /FlateDecode >> stream xÚ31Ó34V0P0bSK…C®B.# ßÄI$çr9yré‡+˜qé{E¹ô=}JŠJS¹ôœ ¹ô]¢*c¹<]ø0Aý? Áøƒ½ýãù† ö@CÿùA2þ€’@5@’±D‚!™dþÀðPI¸ùÌCdþÃÀþƒ¡þÿƒÿÿ “\®ž\\^åˆÓ endstream endobj 358 0 obj << /Length 175 /Filter /FlateDecode >> stream xÚ3±Ð31Q0P0bScSK…C®B.SßÄ1’s¹œ<¹ôÃL ¹ô=€¢\úž¾ %E¥©\úNÎ @Q…h ÊX.Oþ êÿ³ÿg``üÁ~¿ùûÆÿüäØÿÉ?`°gàÿ¤êàÔ õN}`o`üÁÀþ¤›™ÚÔøFÑ¢¢˜ÿ0°ÿÿƒÿÿ? Q\®ž\\à  endstream endobj 359 0 obj << /Length 154 /Filter /FlateDecode >> stream xÚ31Ó34V0P0bSK…C®B.# ßÄI$çr9yré‡+˜qé{E¹ô=}JŠJS¹ôœ ¹ô]¢*c¹<]øÿ0AýÿÆÌذIù~ iÏ"ëÈ?P¨†ñ3õÈÿ@€JR×|Z“ÌÀ0ù Çÿÿ@&¹\=¹¹)“ endstream endobj 360 0 obj << /Length 208 /Filter /FlateDecode >> stream xÚåѱŠÂ@à?¤X˜f!ó·FHÄJð"˜BÐÊâ¸J--îÐÖ|1}_aaËÁu=ÎÒÎe¿Ùýg›Mû]îp,+íqÒçeL?”&Òwš¶¹X¬i˜“™sšË)™|›ßíŠÌpúÉ1™Œ¿$ùMyÆ€vˆ¤Š3|-{Pé½ÓeƒÓ!,¨„GpPghÁºFdPCWTíÓ-”k¦¡Cˆðj( ­g¸f"{¿!ªý—Â[ïÞ—ÿA£œftàùËC endstream endobj 361 0 obj << /Length 235 /Filter /FlateDecode >> stream xÚmÐÁj1à é^=;OÐd-‘õ$¨…îAhO=”‚ÐöX¨ÒÞ„Í£í£ø{ô°˜N"¸Q6>fB&?™Nî'izàmf4Õô™ãáZûÒ||ã¢DõJÆ zâ.ªrM¿»¿/T‹ç%å¨Vô–“~ÇrEP@X×ìû8õ \²²IU{ó˜»ùÁ3ÌbÆYã¥1Ezôè$æ'i=SË©†LÂB„p6Pu Ž–8ç:R†£ ²Ž÷›[4ß9Þ²áéí…ÃŽ&ÎÈ&üZÚú'­ãXήÁÇ_ð%°m¼ endstream endobj 362 0 obj << /Length 209 /Filter /FlateDecode >> stream xÚ•±‚0†0Üâ#pO`Amd3ALd0ÑÉÁ8©£ƒFgúh< ÀÈ@¨…«Ú´_®íÝýýe4fÐÜ,¹ ¹¤kˆ”µÓ„íÅåŽqŠâH2@±5§(Ò½žïŠx¿¦EB§‚3¦ i3 €5C8ZA–›À/:LÊ^ÕÁ­ûpšôXpžÛôkÚF¶­±bIF°Ü2ÕéqžËUœNÐC¨™E>ª_…ñ÷c‹ð+v·d¯ó¯åínÔâ&Å~VŸP endstream endobj 363 0 obj << /Length 260 /Filter /FlateDecode >> stream xڭѱJÄ@à? LaZ áæ4‰Üª[-œ'˜BÐÊB¬ÔRPÑÖÌ›ø*¾‰yË+Äuv²g!–Bà#“ÍÌî¿ÎïúnÙñÎ;ÇÎóMG4÷Zly¿›¾\ßÑ¢§æ‚çžš-SÓŸòÓãó-5‹³#Ö÷%_vÜ^Q¿d ˆRPDZT†¸R´öR ÊOÔµ þ@ù*˜(ÞAWEÁ],øR‚º˜IµRê5ú7P­Ñ&?”2oÆ(~#FLØàgÈü5=dF#ïzv¢L;mf–Ä&,—mXJ[°Ìa Þ#å }Rº:%e-vÁvS½•Ô=U:î霾šes– endstream endobj 364 0 obj << /Length 194 /Filter /FlateDecode >> stream xÚ33Ö31V0PaS Ss…C®B.S ßÄI$çr9yré‡+˜špé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÁõBýc``üßD@.ƒý0ÅÿL1ÿSŒÀÃ?UBÙ7@¨`JJ=SüPêŠýê (<ö¡9ÅñP¯@=ómrüC%h˜ACž  !@ y`> stream xÚuб Â0Ð  ·ô¼/0­ µ‚Dª£ƒ¢³ý4?Å/iLsqˆð’»INÍÆª œ&vª)©9 ¼¢‹åý¶O4¬4Ê©åÊFQê5Ýo3Êj³ ­ioK¨k2ýè D˜ÒÀ€§dFLƤ1’(­C8^Qˆ€„ÉÆDð¹ïɰ|pÃ1ÆÛ½Ó.þ"bøÿyÒ€Œ)™gëºk¸×¿àRã?UŸ’~ endstream endobj 366 0 obj << /Length 166 /Filter /FlateDecode >> stream xÚ35Ñ3R0P0bSCSs…C®B.s ßÄI$çr9yré‡+˜˜sé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þƒÀd’ñƒü†ÿ Œ`’ᘬ“6`R‰äÁAòI68ÉØ€L2`%™‘Hv0)"ÿÿG'!âP5Ⱥ‰ A€J$ãÿ `G@%¹\=¹¹Mÿx× endstream endobj 367 0 obj << /Length 254 /Filter /FlateDecode >> stream xڭѱJÄ@à?l˜&yM"&`µpž` A+ ±:--­7`ákMgé+ä ¼òŠãÖÙÍ& XšæKf’Íì¿]{Üt\ó)p×p{Æ =SŠu¨ÄÎæ‰V=U·ÜvT]j™ªþŠ__Þ©Z]Ÿ³>¯ù®áúžú5ð(ü6S¬ßü`À쑊-Ì— oÕ¶¸áÖë¥d‡ˆ¾¯ I¾Sòý03a‘™LlB".€¿Ñ!1ÍúOx½&ÂpcÄJÂ&ÆHù‹¸£…¸Û…˜„rI)¥ÌÜ” _ò,v0Ÿšõù{lØtéT–‰é¢§úî”Û endstream endobj 368 0 obj << /Length 125 /Filter /FlateDecode >> stream xÚ33Ò3²P0P0bSKSs…C®B.SS ßÄI$çr9yré‡+˜šré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÿÿÏøÿÿ?TŠñó bü78) À¤¯s‘)hèb y.WO®@.!»¥7 endstream endobj 369 0 obj << /Length 106 /Filter /FlateDecode >> stream xÚ3²Ô³´T0P0aKSs…C®B.#3 ßÄI$çr9yré‡+™qé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÿÿ†€ˆ¡¾aècWüÅåêÉÈ3v\‚ endstream endobj 370 0 obj << /Length 165 /Filter /FlateDecode >> stream xÚ31Ò33W0P0VÐ5R0¶T05WH1ä*ä26 (˜ZBd’s¹œ<¹ôÃŒM¹ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. öÿÿ?@"äÿ000°ÿâ„=ˆ¨oÿ`#ø?0üoõ ü ä0X0È`a°o`àŠ2°7Ãñÿ qõ \®ž\\ŸÎ`¬ endstream endobj 371 0 obj << /Length 243 /Filter /FlateDecode >> stream xÚ]ÑÍJÃ@ðYrÌ¡¾@ û&A[sjsìɃxj= QôjöÑò(y„=HÇíÌÿДeöDzÌÌ~,¯/•/üUŒeé7~_òG‹8"ÇÝ;¯Οãšó›GÿõùýÆùúéΗœoüKé‹Wn6^DÈÅ8×I êF"!¢:˜+2oa[8˜®7“`¦dÎ`+ØÂÁÔôhLM‹fp ˜&byiguf0«­~5Õ¿jŸþ©RrÀyd* îÕõSkÜ_ Ÿ¨ NÔÇ÷9LÕxoéá ÿádÔÿ™‹„sù¾á-ÿ5Š•P endstream endobj 372 0 obj << /Length 140 /Filter /FlateDecode >> stream xÚ35Ô³T0P0bKSs…C®B.S ßÄI$çr9yré‡+˜˜ré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÿÿÿ€™dü€þ3 eR/i& 0È ò‚d“Ì`’LÊ?`üßÀðÿÁ@!¹\=¹¹Afl÷ endstream endobj 373 0 obj << /Length 244 /Filter /FlateDecode >> stream xÚuÑ?kÂPð{<0p² Þ'ð%œÿ€ ur(Ávt°ÔÙ€«ê•]ÝÌGÈè|½¨X#yîøÝ=8. [~›< 8¢€:½û¸Ä°ËµW”ÅÇ|ýÕ”Â.ª1wQÅÏôõ¹ú@ÕjH¯>yoÉà瘣1 ýƒ¸ 8hFãx‡]Ê*ñ›1æ•øá8§¾yºØTBŸ¤,a P³ —À“M õ2Ü< œ fepÒˆ\$ÀIÂÖ5+zÛG4÷V¸Y5D NZ@fWðí¤'c´ÔÒÇýoÊÀQŒü¦Â! endstream endobj 374 0 obj << /Length 243 /Filter /FlateDecode >> stream xÚUпJÄ@ð/.0…ûfŸÀMNÖ?óSge!Vji¡hkRù\AKÁTÖ©$EØuwöŠM1üøf`Šï`¹·<’…Üw£¥>”w%=’Ö.>úÃí­jRWRkRçnKª¾ÏO/÷¤V›SY’ZËëR7T¯¥µ@fµm óÀ¦‡í¼ÅÏ0 à{d¾¦˜üۘÎ=õ4]LÕ3ùȦ€aÒ@b·´liº@ÏT|`Ä“MLjbËÀ¾Å4ŸLõ“ÿ1ÂÄdtFÀœW$®Gœ á*Ã.|ר™±ÕtIÿ6D†c endstream endobj 375 0 obj << /Length 239 /Filter /FlateDecode >> stream xÚ­‘±‚0†Ï8˜ÜÂ#ô^@D'ÔDŒ“::htGáxWÚœmš~éÝßöú_LÂyÒxJsNgoô(ò»ÌéŠIŠîžÂÝ5‡ÑM7ô¸?/è&Ûñ~IŸ¼#¦K¶ Cµ¥ Ô¼*x1F%¨À)dBœÃè ñ‘Š…¬ªA«ÑŸ8çEÅjGîU…Ò(ßNk¼ûÈ4ª,— ~ÐjÔ…}Á<ÛC¿2[|Žþfa?­-ÈÖžÆ3ë ñ“­oŒ×œÈ¾}°]Ñ=ÂUŠ;ü”K‰É endstream endobj 376 0 obj << /Length 167 /Filter /FlateDecode >> stream xÚ35Ó35T0P0bS#Ss…C®B.K ßÄI$çr9yré‡+˜Xré{E¹ô=}JŠJS¹ôœ ¹ô]¢ÆÄryº(ü‚ ê„úÏÀÀø¿,ÊÀ ÿLñSÌ? Ô0Åø™adªT Y;ªÑPû ¶CÝuP7ÈÙÿÀÔˆ ƒ™….ĵ˜—«'W ŽK€¿ endstream endobj 377 0 obj << /Length 221 /Filter /FlateDecode >> stream xڕѽ Â0ð–‚ì#x/ i*Uœ ~€ÄIí£ù(}„ŽJãÙK Í"&…äHrÿt¢F*ÄÇ8 q¢0šâYÁ È€f4ãÊé óäžê ×´ 2Ùàãþ¼€œo¨@.ñ 08B²D­uåÐ uf,HW§‚ ô¥lüfëç¬(ºz¥eõ§Ö~ûüæÞ¦Øô§¹_Qš@™ñÍëõ6Ò+L®6ŸñeålóZ¹šÿ«›v,X¿ÕKéP~ï‡ÞEÔºe¯Ö©úN=â’¹«vð™<›Â endstream endobj 378 0 obj << /Length 256 /Filter /FlateDecode >> stream xÚUϱNÄ0 à¿Ê)K¡~h{=îÄB¤ãè€Ó ˆ @°!ZÞ̉èF%Psw ²|Jì8¶ç‹Ãª¦’æt0£ùŒŽŽé®r®^j°¤EµËÜ>¸U㊠ÕKWœkØÍ=?½Ü»buyJz_ÓuEåkÖ?€ÆŒ!òÎf°l#>Ù3ZÎ;@Î'€ç7Àîx ïÉ&Œ&È–Nm9ƒR0—!¡G/aEïFD+E$½ÑŒµ²MX‰¿„^É>a‡-úÆü‘Mˆÿèû=¦×:upÇ´–¤-µiÞ}õèGŒˆA§Š^{s¦ywÖ¸+÷=Ÿ†# endstream endobj 379 0 obj << /Length 150 /Filter /FlateDecode >> stream xÚ3µÔ³4W0P0bSsJ1ä*ä2ñÁ" Fr.—“'—~¸‚©1—¾P”KßÓW¡¤¨4•Kß)ÀYÁKßE!ÚPÁ –ËÓEÁþ?<@£0ÿg`ÇÀøùA ˆbüP¢>€©T*L`¥€)‹`J+ŦF Åþ¿Hʃ‚ârõä äWÎr° endstream endobj 380 0 obj << /Length 191 /Filter /FlateDecode >> stream xÚåÐ= Â@àÑÖBÈ\@7‰¬ÆJðL!he!Vj)¨h«9šGÉ,SˆëlÅ3X,ßòf˜âu¢VsÀmnFlzlº¼ é@ÆH¸¤˜¬w4HH/ØÒ‰I'S>Ï[ÒƒÙCÒ#^†¬(±µÊ>ñl \3X~ZPCAù©J'BEH?4€þ—ºôuâ7{©-'¿ROrï%ËxºVÝ™‹Ã·¹CÙ ï qBszØxaº endstream endobj 381 0 obj << /Length 240 /Filter /FlateDecode >> stream xÚmÐ1jÃ0Æñg1> stream xÚuÑ1KÄ0àW „ãºv8ÈûÚôÎb ç vtrá@ÿ…?'â)ΤC¹ø’£âMHøH^ÂK^Yì/Pá÷æX.°8ÄÛ\<ˆR¡ëÅÑvçæ^,k‘]b©DvJË"«ÏðéñùNdËócÌE¶Â«Õµ¨WhíÀ­í"kÿ·ä@öŒæ¤àmDâ$f~¤#; Hl ¿¥½8@£ÁŠwdFUšì¨%[pù¤^q(é`J7)¯Iˆ’›ÑMk¯T¢äRÙñRI JN%}¤½Ö<=“Dt2l¥IÜ©yÑÑ&ôFš:Uï; ôAš9ÉOŠ} ô5*¡¿­ºÿÄÿ‰°­ ÄœŒE'"'íEÑ<´¾¦®_g'µ¸ßÑÆ©Ñ endstream endobj 383 0 obj << /Length 279 /Filter /FlateDecode >> stream xÚ]ÑAJÄ0àC»…МÀ¦Ç.„Â8‚]ãÊ…êÒ…¢ëöÁ«ô&æuW°ôù’<3‹ôãÑ¿ù».OËÊXSÒZ[svnž ýªIkÂè_<¾èM£ó;šu~žÍyûxÖùfwi oÍ}aìƒn¶¦E„'8p…@ë@Òµ1Ù±=™Ž h¨ $«3,ØÄ+N¼€ÝŠ­‚moƒµÛ³.˜ }0ý颿Q…£’x(`ÜO‡b<¾£âkˆç|ŽÑ4ºPS0á€%»â€ ¢–ƒöàØÞW¾œÌÈCeàË  »ä›PIÂ{Á7™½]øоiՈݱúªÑ·úR}Ý endstream endobj 384 0 obj << /Length 231 /Filter /FlateDecode >> stream xÚÍαJAàYÈÁL›"y÷.p1©b¯L•BAS¦P´Î=’p²2EÈ8»n@ô,†ofgÙ§“ËÉŒK®´¦×WüRÑ+ÕsË8ÆÅó– ¹5×sr·zJ®¹ã÷· ¹Åý5Wä–ü 7©Y²È ð~k%…öÒvìT²Z^{ÓcÝÙ³ ÷ÃâôU«o²CÕ0Ë–*¤ÅSTB¶‹ú`ζÑñÞ&‡í%‹ãE¶Ÿ´§QÒÈ0›b4è3¾Ýe}÷¿Íÿô"Ý_馡}Èl® endstream endobj 385 0 obj << /Length 232 /Filter /FlateDecode >> stream xÚUÐ1JÄ@Æñ/¤¼&GØw“@B,ÄuSZYˆ ¨¥ ¢`—-GÙ#liv|ß‹ÜÀü`fÈŸ™iÊ“¶ÖRu«M«Ï•¼K]Ù¼ä”O¯²î¤¸Óº’âÊV¥è®õóãëEŠõÍ…ÚêFïí—é6¢}8rB²G‘š² ç g@þãîp ¬vøÂoûÑðDšD,ZŒN€Çà±E‹Ñ- ®Å-FIâ2vpŽeDZdøÓbt¤½k±Ùt`ÌÜÓÔel6óXÆË"÷ó­üdÁí=yÙ<"ú»ýW.;¹•_µštó endstream endobj 386 0 obj << /Length 204 /Filter /FlateDecode >> stream xÚmÌ; Â@à . ´Vf. ›´1àL!he!Vji¡(X›£å({„”Á8ë£—åø‡ùÝéÅQ—Úš’˜º}Úi<"ÏÈŃ÷f{ÀQ†jÅ{T3ŽQes:Ÿ.{T£Å˜4ª ­5EÌ&¡€º6äü¥…°%/_x÷/PAP02gøýÁ0Ò¦–yp&îî¬dBw›:Œ+0ðÁüâ}¨AT¾yóMÞ6Ó¢5lö–¢.Ë5²Ài†K|¤øT£ endstream endobj 387 0 obj << /Length 198 /Filter /FlateDecode >> stream xÚ31Ó34V0P0RÐ5T01V0µPH1ä*ä21PASKˆLr.—“'—~¸‚‰—¾P˜KßÓW¡¤¨4•Kß)ÀYÁKßE!ÚPÁ –ËÓEùÃT‚D0S$ê00|`ÇÀü¹A¾ù;ÿæ ì˜ÿå˜00þ* àÄ?8Q"êI&êPMÊøbÛ½`Ëßœq ä ã ò Ìê˜þÿ:]þ—«'W ÈckA endstream endobj 388 0 obj << /Length 182 /Filter /FlateDecode >> stream xÚÎA ‚`à'?( ‘œ ”ýüºÌ A­ZD«jXÔ.Ì£yàÒ…Tcu€ßæ 7f: 5ÙðP³™° ø éL¦ %¿—ý‰â”ü MþBbòÓ%_/·#ùñjÆ’&¼•ÎŽÒ„¡ZÀ{ÈUe5ÈTÆ©¬Ö-Õ‡W¨6êÀj@-ÐÉÅóOù¯Ó‰;*`{ú^‰ž[bàTd7“ý w§”§ÍSZÓ»= endstream endobj 389 0 obj << /Length 198 /Filter /FlateDecode >> stream xÚ31Ó34V0P0VÐ5T01Q0µPH1ä*ä21PASKˆLr.—“'—~¸‚‰—¾P˜KßÓW¡¤¨4•Kß)ÀYÁKßE!ÚPÁ –ËÓEÿó‚ÁþT‚zó !ÿHÔ±÷`øÁøþó†ú쀶¤ „|P±=˜i«‡u âÉDª)öph‘<„ÚkrF=ÈAï?0þ`<ÿŸ¡†½ÿ?ƒü?þÿ ì@‡s¹zrroXhI endstream endobj 390 0 obj << /Length 189 /Filter /FlateDecode >> stream xÚ]Î1 Â@Ð\˜B/ 8ÐM²(ÚЦ´²+µT´“èÑr”!åbI qáÁ23ü;èö9änÀ¶ÏvÈû€ÎdC)úlGUgw¤IBfÍ6$3—2™dÁ×Ëí@f²œr@&æm)‰Ú¸·2Ï©\^¡sϵ2¸Î÷¯HÅøQ‰RñþQÖOþø—Ö5ÉQÑJrµìhè M£íÂá„TårL¼@³„Vô½£@ endstream endobj 391 0 obj << /Length 141 /Filter /FlateDecode >> stream xÚ32Õ36W0P0bcSK…C®B.# ÌI$çr9yré‡+Ypé{E¹ô=}JŠJS¹ôœ ¹ô]¢*c¹<]ê˜ÿ70ð|À ßþ€ÁžÿCÿ`ÆÌ00ŠÿÿÿÇäè§3ÿa`¨ÿÿ޹\=¹¹¢&[ endstream endobj 392 0 obj << /Length 237 /Filter /FlateDecode >> stream xÚ¿J1Æ¿00…ñ v^@³9ïäŠÃ…ó·´²+µT´[¸}´> stream xÚ31Ó34V0P0bS …C®B.C ßÄI$çr9yré‡+˜ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž. Ì€à?É&™iN‚ìaþ`ÿD~°’È700nà?ÀÀüDþ“ØÀÈä‡$Ù€‚ëÿÿƒÿÿ7 “\®ž\\y endstream endobj 394 0 obj << /Length 122 /Filter /FlateDecode >> stream xÚ32Ö30W0P0aCS3…C®B.C ßÄI$çr9yré‡+Zpé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]˜ø0È@A@ 8~Àüá? ±q©ŽØ0üÿ‚¸\=¹¹(CE` endstream endobj 395 0 obj << /Length 150 /Filter /FlateDecode >> stream xÚ32Õ36W0PÐ5QÐ54W0´P05SH1ä*ä22 (˜Ãä’s¹œ<¹ôÃŒ ¹ô=€\úž¾ %E¥©\úNÎ @Q…h ®X.OÆ ìø   P?`üÁð†Ø€¸ôE6Œ?êügüðŸ‚üc?PÃ~À†Ÿÿó.WO®@.ÿ§Wõ endstream endobj 396 0 obj << /Length 196 /Filter /FlateDecode >> stream xÚµÍ1 Â@Еir3'p.#˜BÐÊB¬ÔRPQ°ÍÑr±0EÈ:? êdÙ³3ó7èuÂ.{Œô¸òʧãH‰ÆrCqJzÆGz$¯¤Ó1öÇ5éx2`ŸtÂsŸ½¥ […RÊüâë?´LõºæÝ3Ø‚ærÁÊkm‚¨„;xÔÂ3êH†Kv¤Ø@%¯â.êýoÔ nn—**ŒÉù@Ô¦ôDr endstream endobj 397 0 obj << /Length 108 /Filter /FlateDecode >> stream xÚ32Ö30W0P0aCS …C®B.C ßÄI$çr9yré‡+Zpé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]˜?0ü‡!þ ̃±ÿ`øÿÿq¹zrrÆ‚Q. endstream endobj 398 0 obj << /Length 177 /Filter /FlateDecode >> stream xÚ3³Ô3R0Pa3scs…C®B.3 ßÄI$çr9yré‡+˜™pé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]˜?ð`Àðÿƒý†ú@úƒ=ãƒ:†ÿÈ77Ø3ðnà?Î ßÀüÿˆþÇÀDÿa`ÿÁÀNÿ``ÿ€þÀÀþ`Ð O€âÿÿƒÿÿ7ÿÿNs¹zrr#߈ endstream endobj 399 0 obj << /Length 147 /Filter /FlateDecode >> stream xÚ31Ó34V0P0bcs…C®B.C ßÄI$çr9yré‡+˜ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž. Ìø?00üÿ`ÿD~°’È70ðnà?ÀÀüDþ“ØÀÈä‡$Ù0½ñÿÿÁÿÿI.WO®@.‡e% endstream endobj 400 0 obj << /Length 188 /Filter /FlateDecode >> stream xÚŽ1‚@E¿¡ ™†#0Ðeƒ6 &na¢•…±RK v9Gá”Tâd)H¬ÌN^fþîþù‘žÌ¦ð”Çš£€Ã9Ÿ5Ý(ŒE”qÑßœ®”R{cRk‘I™ ?îÏ ©l»dM*çƒæàH&g8^W‰S­œQƒdHàVðá•R¾ ò!J*¨- Ài~ nNû/†ooñkg»Íîõ$AéÖHåŠ> éáwlzZÚÑIKÚ endstream endobj 401 0 obj << /Length 196 /Filter /FlateDecode >> stream xÚα Â@ àH†B¡y½ž­uj;:9ˆ“::(ºÚ>Z¥p"ØŠç]qÐQ |CB’?Šû2ä€Ü“1G!‡#ÞI:R°«aøm”d$V$f¶O"›óùtÙ“H–$R^K6”¥ŒÊ¯À¨\ƒ¹UW0÷Â/¼º%>Á«°T¨5*è´4hy~“ÿÌ÷ö²¥ý¦Ýß> stream xÚ31Ö³0R0P0VÐ54S01Q06WH1ä*ä21PASc¨Tr.—“'—~¸‚‰—¾PœKßÓW¡¤¨4•Kß)ÀYÁKßE!ÚPÁ –ËÓEùÃùŒêØ0üa<|€ùÃãìÊð?`0?À€Áþ€> stream xÚ36Ò35R0PacCcs…C®B.# ßÄI$çr9yré‡+Ypé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ØÈ3üPàÿÃÇþ?nÿÀÿœýó3 ~Äo˜0ÿah`þÁÀ€‚?P³Íüÿÿs¹zrrjÙF„ endstream endobj 404 0 obj << /Length 195 /Filter /FlateDecode >> stream xÚ=αJÄ@à¶X˜fßÀÌ x{›`TñSwÕ‡•Z * Wî£í£ÄÊ6`“"8Î%GŠ™ùÿfŠ|q~ÆK.ø4p¡ó‚½R^j¨çåÔ<> stream xÚ36Ò3²T0P0TÐ5T0²P05TH1ä*ä22 (˜Ad’s¹œ<¹ôÌ̸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž.  Ø W á Œ@Ì Äì@,ÿÿ?Ã(f„ÊQ „þ0‚pC sC3ƒ=;ÿ?°f.WO®@.uH– endstream endobj 406 0 obj << /Length 153 /Filter /FlateDecode >> stream xÚ31Ó34V0P0RÐ5T01Q06WH1ä*ä21 ([@d’s¹œ<¹ôÃL ¹ô=€Â\úž¾ %E¥©\úNÎ @Q…h žX.Oæ ìþ`üJò`À‘p’ƒºBþ`°ÀÀðƒ¡üÆçÿì™Iùÿí@’ùÐ.WO®@.1c endstream endobj 407 0 obj << /Length 183 /Filter /FlateDecode >> stream xÚU̱ ‚PÆñ#‘k[çêªWJ'Á rjjˆ ¨Æ†¢¶ˆûh>Š`›Ph—º—jù ÿ¾@ BŸ\ò©ïQà“ÒÎÃ#ŠHE—Äè³l˜dÈ—$"äS•‘g3:Ÿ.{äÉ|Lò”V¹kÌRj×_œ œÒ.Á.X ,g0i)à <¡¥©¡pƒ¶&†®A†=éjœ|c(v‘kØ]þb=ÀÐ(Ô¿áúO¨ÁI† |F£?ê endstream endobj 408 0 obj << /Length 233 /Filter /FlateDecode >> stream xÚUÎ=KÃPÅñs Xx³v(æùzËíËb ­`A' ÖQ|A7©‘|±€Ð~Lïx‡`¼7UÓN?8gù«áá°Ï!ñAÄjÀÝÏ"z$¥ìr·¿~nîh”¼d¥HžÚ™drÆÏO/·$GçcŽHNø*âðš’ WUPñ÷6¾Aß´4æðŠ5¹§q ‘þ" bxØ%âtÇq¿Á_ù®cùGˆÅ²h;²š÷L€ Ëtè5Â<þfúOk…2·|âµÁ+ñ–ZlECÝdÑ ±ï(°ç˜ÂÑIBô¥Y_™ endstream endobj 409 0 obj << /Length 210 /Filter /FlateDecode >> stream xÚMν Â@ ð)(¡«ƒÐ> stream xÚUÎÁjÂ@àYi® Î èn²Zõ$¨sÚSE¨GÁ½‰æÑöQ|„x ‰³²Iéå;üÃüü=ÝF¤(¢N8 ^DúÖ!þ qª¨¯ÝiµÅIŒò‹ôåœs”ñ‚ö¿‡ ÊÉÇ”B”3úI-1žQY¦ãâàAægà//7ˆœŽ4gËZŽvª*Ì 0‰Ã¿˜Š+ã]S‡¸CEÉ@QsüϰFÕì,IqSn/¼'¶’gCþbŸ^m‘mjg`ç1øã'>ÚŸKø endstream endobj 411 0 obj << /Length 183 /Filter /FlateDecode >> stream xÚ%Î1 Â@„á‘@„‡$|'0‰+AA¢‚)­,D¨¥ ¢æQ<‚eŠ`œÅ_ìì·°&î# µÇL_M¬‡H.bìÚ£½ØŸ$I%ب‰$Xp• ]êíz?J¬¦Êu¦[>ÙI:ÓIU•uO§Ã)Fh~ðß!;£ó:còÌÛዬQÖ‘‚ôŸÿ)HÿåpIëH]R·YÀ#õH[¤mé(œ²âl2Oe-?uàC endstream endobj 412 0 obj << /Length 188 /Filter /FlateDecode >> stream xÚµ1 Â@EH!L“#d. ›ÍºˆBŒ` A+ ±RK EÁBb޶GÉR¦R×l´6¯˜˜ÿþPtÌ+îǬƬ5$Ii;ŒXÜf¢$#±a¥I,ì˜D¶äëåv$‘¬f,I¤¼•í(K~ |[äj¿„W¢‚opGÏà ÀÄ!´—S‹¢E¦ /‹òèzù´ÌO¾6x+Ó¸YÛ~åÕÎÜuдñí…æ­éÂÕ`ú endstream endobj 413 0 obj << /Length 121 /Filter /FlateDecode >> stream xÚ31Ô35R0P0bc3SS…C®B.# ßÄI$çr9yré‡+Ypé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]0001;Ëñÿ ÿaX*6T°ý†úÿÿ?À0—«'W ¾NÚ endstream endobj 414 0 obj << /Length 228 /Filter /FlateDecode >> stream xÚmαJÄ@ÆñoÙ"0M^ป'p÷WóSZYˆ ¨¥ ¢`eòh>JáÊ+ŽŒóé5‚E~°;ÿY²¬šc­té_^iÓèC-/’³Ÿ+9¸’u'éZs–tî·’º }{}”´¾<ÕZÒFoj­n¥Û(Ê-€~‚Ù€8¶#J^ÎQì0CÜc…0áùîÈDÌ_úŸžÓÁïø:ßsöNüaçü™r$_΂[-> ³À,°ˆ, %‡s„'äƒlÏ"³ÈÌñ¥™aAZÒ›M°¿ÈY'Wò TŸc| endstream endobj 415 0 obj << /Length 235 /Filter /FlateDecode >> stream xÚuÐ1NÄ0ЉRXšß`3', ZiY$R AE¨€ ´ØGóQr„”[¬0¼„‰"OÊŒóÇ“ãîÈ/¥•^—ÒŸ‰÷òØñ+÷ÅVüɾóðÌëÝ­ôžÝ%Êì†+yûxb·¾>—ŽÝFî:iïyØ™-­2È9QµµÕ EëPõE6‚f¤LÍôV»&‘ÆàðÌÔb&e6‚€§Ñf“õÕŽó‘òY (yâ/ifU ý°Å_ cBüÔ¨M>Õ‹ý‚¸Ÿ™°y¥ÿ€‚޵¸2_ |ÃßÇ›jh endstream endobj 416 0 obj << /Length 188 /Filter /FlateDecode >> stream xڕν Â@ ð+ At-(˜'ð®¶µkotr¡P?ÁQðÅ_ÄÇè èý‹­³ù‘äIàõÃ+FŠÃ!¯=Ú“™º,ñ‘o)Ñ$ìG$'¦KROùt8oH&³{$S^z¬V¤SBĢ⊠ØÀ©iƒèA«äf°1ë€h‚.p;»Áö`¯Z  \2ðoóŠß›ÿÂy™³54Ö4§òý`ö endstream endobj 417 0 obj << /Length 226 /Filter /FlateDecode >> stream xÚ•Ï¿jAðïnaÜ ˆÎ ˜½s=b!j W¦J!‚`R ìnÍG¹G°´8ÜÌœEH:›_1;ödÏyŸSp¯ÏnÈyΟíÉ9)¦œ¿Ü_6[šd?Ø9²oR&[Ìùð}ü";YL9#;ãeÆéŠŠÇÀŒÇæÒºÂ„ÐpQ*Å+j .+xsº7á”xÄ•‘Íç–Üð‘\ƒ }µrÓþ† ”¿ø´•R þ/:tK­¬uéîNTc¨'Û¼‰Ä'ò¡jìiT”2ƒ®D¥×‚Þé+XÑ endstream endobj 418 0 obj << /Length 243 /Filter /FlateDecode >> stream xÚm½JÄ@…OØ"p›¼ÁÎ}d³ƒÚXW0… •… j)¨hëäÑò(ó)S„ÏD…m>†{çüÜuuìVZj­G+­ÏÔ9}ªäMjÇa©îägóø"›VìÖNìÇbÛkýxÿ|»¹¹ÐJìVï+-¤Ý*Ðô@ P„sŽºø‚&¾³¾[ D>#E@ƒ¢Ç†r˜Iõ~2û> stream xڕα Â@ àHÁB}Ѽ€Þ]õ¤“…ª`A'uª(¸ÙGóQî|ƒšTZèàà‘û†?$w#3°i²ÔhdÈŽéhð‚CË!Çá·s8cœ ÚÐТZpŒ*YÒíz?¡ŠWS2¨f´5¤w˜ÌHŸP˜Qžç®ÎëY’ 4aÐ:B@à ¸Ç8 ‚—1¾ìn -¡SQ¼üRá-8­ð d“_Ñ®Ó+ÈJ¢_<ÿ!’¯tùâ<Á5~lúQ- endstream endobj 420 0 obj << /Length 265 /Filter /FlateDecode >> stream xÚMÁJÃ@Eo˜ÅÀ[8мÐ$A„ÒB­`B]¹WêÒ…¢ÐEÁù´ù” ;#Ç›*ÖÍyóî{wæÎquÔLµÔZ§ZŸjÓè}%OR7KmN~&w²l¥¸Öº‘₲í¥¾<¿>H±\Ÿi%ÅJo*-o¥])L OÄ[ À`;d1ëa¶°3X`LpÀM6{ä{xÖSÏœ˜°Hpžî|tO¥0£1l¹6Ì ùi4ÈþÓ,ìÀe3zŸÓáw™gRÒô¦SÅß@v伕+ùÿcå endstream endobj 421 0 obj << /Length 237 /Filter /FlateDecode >> stream xÚuÏ1NÄ0бRDšÆ@ò\œlÖBT––E"Tˆ ¶¤AKr®â›ì!eŠ3³ ˆšgiÿ_×'aE5t¼¢æŒB ÇŸ± 2¬(œÎ_žpÓ¢¿¥& ¿”1úöŠ^_Þvè7×çT£ßÒ]MÕ=¶[‚b—….'0SÉ2*(ÙŒ`&p ÞÁõBì!Ît ç¼àÒð_èÝ_èR¥c§Ø™%Éž 6{6Cñ!I¬cˆ“Ä)A×ô?€Ö«ÌÁ“ôXZ1IÁØËN+éOVë”ùÀäqY‰-Þàú m9 endstream endobj 9 0 obj << /Type /Font /Subtype /Type3 /Name /F15 /FontMatrix [0.01204 0 0 0.01204 0 0] /FontBBox [ -4 -21 83 62 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 16 /LastChar 122 /Widths 422 0 R /Encoding 423 0 R /CharProcs 424 0 R >> endobj 422 0 obj [27.68 27.68 0 0 0 0 0 0 0 0 0 48.44 46.13 46.13 69.2 0 0 23.07 0 0 0 69.2 0 23.07 32.29 32.29 41.52 0 23.07 27.68 23.07 41.52 41.52 41.52 41.52 41.52 41.52 41.52 41.52 41.52 41.52 41.52 23.07 23.07 64.58 64.58 64.58 0 0 62.28 58.82 59.97 63.43 56.51 54.2 65.16 62.28 29.99 42.67 64.58 51.9 76.12 62.28 64.58 56.51 0 61.12 46.13 59.97 62.28 62.28 85.34 62.28 62.28 50.74 0 0 0 0 64.58 0 41.52 46.13 36.91 46.13 36.91 25.37 41.52 46.13 23.07 25.37 43.82 23.07 69.2 46.13 41.52 46.13 43.82 32.52 32.75 32.29 46.13 43.82 59.97 43.82 43.82 36.91 ] endobj 423 0 obj << /Type /Encoding /Differences [16/a16/a17 18/.notdef 27/a27/a28/a29/a30 31/.notdef 33/a33 34/.notdef 37/a37 38/.notdef 39/a39/a40/a41/a42 43/.notdef 44/a44/a45/a46/a47/a48/a49/a50/a51/a52/a53/a54/a55/a56/a57/a58/a59/a60/a61/a62 63/.notdef 65/a65/a66/a67/a68/a69/a70/a71/a72/a73/a74/a75/a76/a77/a78/a79/a80 81/.notdef 82/a82/a83/a84/a85/a86/a87/a88/a89/a90 91/.notdef 95/a95 96/.notdef 97/a97/a98/a99/a100/a101/a102/a103/a104/a105/a106/a107/a108/a109/a110/a111/a112/a113/a114/a115/a116/a117/a118/a119/a120/a121/a122] >> endobj 424 0 obj << /a16 355 0 R /a17 353 0 R /a27 358 0 R /a28 357 0 R /a29 359 0 R /a30 360 0 R /a33 344 0 R /a37 356 0 R /a39 345 0 R /a40 339 0 R /a41 340 0 R /a42 346 0 R /a44 347 0 R /a45 354 0 R /a46 348 0 R /a47 349 0 R /a48 412 0 R /a49 413 0 R /a50 414 0 R /a51 415 0 R /a52 416 0 R /a53 417 0 R /a54 418 0 R /a55 419 0 R /a56 420 0 R /a57 421 0 R /a58 350 0 R /a59 351 0 R /a60 341 0 R /a61 352 0 R /a62 342 0 R /a65 361 0 R /a66 362 0 R /a67 363 0 R /a68 364 0 R /a69 365 0 R /a70 366 0 R /a71 367 0 R /a72 368 0 R /a73 369 0 R /a74 370 0 R /a75 371 0 R /a76 372 0 R /a77 373 0 R /a78 374 0 R /a79 375 0 R /a80 376 0 R /a82 377 0 R /a83 378 0 R /a84 379 0 R /a85 380 0 R /a86 381 0 R /a87 382 0 R /a88 383 0 R /a89 384 0 R /a90 385 0 R /a95 343 0 R /a97 386 0 R /a98 387 0 R /a99 388 0 R /a100 389 0 R /a101 390 0 R /a102 391 0 R /a103 392 0 R /a104 393 0 R /a105 394 0 R /a106 395 0 R /a107 396 0 R /a108 397 0 R /a109 398 0 R /a110 399 0 R /a111 400 0 R /a112 401 0 R /a113 402 0 R /a114 403 0 R /a115 404 0 R /a116 405 0 R /a117 406 0 R /a118 407 0 R /a119 408 0 R /a120 409 0 R /a121 410 0 R /a122 411 0 R >> endobj 425 0 obj << /Length 327 /Filter /FlateDecode >> stream xÚ•Ó¿j„0Àq%C ‹`ž *½B]®W¨C¡:”NmÇ-ív¨–GÉ#dt—&æ—?RiDø¨ ~ýi]_\V´¤;½×WôzGß*òIê’šMš ¯dß‘â‰Ö%)îôYRt÷ôûëçû‡Z‘â@Ÿõm^Hw ‰YmVìaܶb«Nß4RbÕXM›Î”\u®N›n•ònbÁý |ä± –mˆœbçÞ©¶‹LEæ´]$â±±7æ!3äi»ÈlŒzçÚ.2Ob'Þzº>¸Ñƒtî!ò¸´—Æ9™7Ê ×˜CîÒ.Ík&) 7L³Èʬ ¦k–üÓùì“ËõÁóÇ Á͹!¾·!×Kk¹KÛøÌ!×#°€Ü¥m<æá“ÆÌþçÎFkó(­°¿4J@?û¯ÉmGÉ/ðc ¥ endstream endobj 426 0 obj << /Length 105 /Filter /FlateDecode >> stream xÚ3±Ð31Q0P0bS #…C®B.C ßÄI$çr9yré‡+˜ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž. ÿA ÉÀþÿÃ(9THü±ÉåêÉÈ’:Õ° endstream endobj 427 0 obj << /Length 209 /Filter /FlateDecode >> stream xÚ³°Ô³0U0P0b c #…C®B.s ßÄI$çr9yré‡+˜[pé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þƒÁëÿ8ëœõ¿ÎJóƒûÿ ,fn0‹¤ªÿcÙ5CXòÿ@Y ÂbGb}ÀÂúe1ceý¡ Ÿ½ìH,ln~÷å Ÿ#BBðPŒº`pÎb€±~ÀY 0SFYä± I—«'W TÛ4# endstream endobj 428 0 obj << /Length 346 /Filter /FlateDecode >> stream xÚ}ѱJÄ@à?¤l“v_@“pÞ] !pž` A+ ±RK E;!÷hñMÎ7H¹à’qfwO ¦ù`vv23»œ•µ)ÍÒVf±0õÌÜWêIÍ%Xšú8œÜ=ªU«Šk3¯UqÎaU´æåùõA«ËSS©bmn*SÞªvm€| 82"‡7@бï, }8$´þtHIR2>JØÜJ =°MT;4[6ÿ±ùR׳éÄÄ~“û íD©Ï}~k£.:Âíì£6ʃH«¬Ï±¥DÎJ†wðkñ©8ÊÌ1ÁÛ‡=Iszÿ‚‰6üÑWÎBðJIľ7ìl¢:šÇa²hJ½Ý7ùCÞ¦ûßÍ8‘ÂýðˆþÝÆðâÞ5,φýkV›Ôqœ<ò Òöè÷Ã/™„µXY×dã|…ËvRJµêJ}áI± endstream endobj 429 0 obj << /Length 270 /Filter /FlateDecode >> stream xÚ•‘±JÄ@†'¤Ls°óšL® œ'˜BÐÊB¬> stream xÚÝ‘=NÃ@FÇJišÁsX[NŒ©"åGÂTPR€ ¶;®•ä 9BJGZí0;Þ J¨Øêifw<~ßEqžU”QAg9•—Tô˜ã –)fTûÎÃ3Îj4wTNÐ\IM}Mo¯ïOhf7sÊÑ,h•Svõ‚`Úæ_À ühv= ™{H™× ³ïñž¡±ÁBÊ [rë¡%k‰TïË3¶ü·š.‚ 0=€;  ý Ú¿€“ûv>ò;ö»ÕbC _Æ\”Éõ¶Aøf #àc§ƒ—è,'·4/+;h‚¼q1h¸¬ñ?7p% endstream endobj 431 0 obj << /Length 243 /Filter /FlateDecode >> stream xÚµ±NÃ0†/ê`é?BîÀ‰dSº`©‰ HeꀘhÇ XI-Â#dÌ`å¸s‚ºtÅËgý÷Û¿î·×~Iyºª)x ö5¾£_‰XQ¸™&oG\7èväWèEF×<ÑçÇ×Ýz{O5º ½ÔT½b³!€ÿ€œÈ£‚™Oª±ª–!2J`@;€÷PŽPÈ<²;…‘GgÈ3E9c̈¹*lÊ0´9Útüø / Îà Ýìi†Õnʲm'¾©¿;)¤ø–),åˆbÈߘ^‹ìJq™©Ý‚§®£zµlÑð¡ÁgüÍF‹¾ endstream endobj 432 0 obj << /Length 253 /Filter /FlateDecode >> stream xÚÕÒ½NÃ0ðT"ÝâGȽu¢~n–ú!‘ &ÄŒ ˜Ý7è+õQúíØ!ÊŸ³¯ñ‚ŠÄ„ˆdå—‹³ÿÊl4¬æ\ñ˜¯jžU<ñsMo4HQÇúæé• Ù{žNÈ^K™lsÃïŸ/d·K®É®ø¡æê‘šgáʱ‰wƒ_ s=Ìÿ‡$ p8E €.¢° (±s‡×…¢ÀŸÂ4Ž2ì¥*ȱÓ| ]¹Ñ6&âÜ´LèÎpßàÚ‹À_à‡ýøËÇIHGN!ÄXÊ>±] ³7ž#†Ýfæýß".ŒÎF«?«Ç^Q 3Ò™Ö Ýщb= endstream endobj 433 0 obj << /Length 244 /Filter /FlateDecode >> stream xÚ…¿J1‡gÙ"0M!óº·`D«Ày‚[ZYˆ•ZZ(Úºy´}”<•aÇ™¹ãôP1|ðå—?üâéáIO :¢ƒžâ1ÅH=>cT¹Pc;÷O¸°»¡Øcw!»á’^_Þ±[^‘ØÝÊ™;Và8ƒŒ‘?dm˜gPÇj·\R…q :“dÄ„*Á |…Vbn¶;ƒg³Eó çd˜ö1Öo( Ø÷aãhDBÿcü³!ýD[Áo˜¬1¿En¥ ¹±¦ä%iêÝînª6N:ó\ÒZÛ` æ]H›_ÙI<ð?yë­œ endstream endobj 434 0 obj << /Length 175 /Filter /FlateDecode >> stream xÚÕн Â0àá–>Bï L*)¸j3:9ˆ“vtPtnÍGé#8fœ—:èÒM‡|ä~àŽ3z> stream xÚ36Ó35Q0Pacc …C®B.# ßÄI$çr9yré‡+Ypé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ìþ``üÿ€ùÿ0fÿÿ+†ÉƒÔ‚ô€õ’ ä0üÿ‰˜aˆàÿÿÿ@Ç\®ž\\ÍÙ¥; endstream endobj 436 0 obj << /Length 107 /Filter /FlateDecode >> stream xÚ36Ó35Q0Pac c…C®B.#K ßÄI$çr9yré‡+Yré{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ì0üÿ‰™˜aãÄÿ„޹\=¹¹µ‰Ã endstream endobj 437 0 obj << /Length 232 /Filter /FlateDecode >> stream xÚíÒ½jAð WÓÜ#Ü>·ÔŒ‚WZ¥©LÊ+³vrp!E¶›üçT°+‹ ó›Ý-ÆÙÇvïÞXÓÅqöÁt;æÍñ';ë±j-->x˜súŒÇéiNó©Y-×ïœgOÙ‘yÁÌ+ç#CYEI ºO$RáxŠ%4ˆDJʤnï«Ò 󢣨Ò×®U¶¤ Hª@Yûƒ$߸»Np·â§¤D@¥(€þ¿ØAx^ƒæ §¨å9ìÅE…ÿÇÍÛ„ÂÆip xœóœÿvÚiC endstream endobj 438 0 obj << /Length 184 /Filter /FlateDecode >> stream xÚíѱ‚@ à& &]xúÞÜHLtr0Nêè ÑUy´{ጃ „zwÀ¡Í×6ÿÔd4”’™JBG´ñ„qlfiG{Ø1+P¬)ŽQÌÍE± Ëùz@‘-§¢Èi’Üb‘¤‚˜µ©ÒÁc®|æÚ!P÷Æái à±®!`{èø.ÿT¼ÊV6ß¡ýAÓõ_°yÍÀ4Õ8+p…o âøš endstream endobj 439 0 obj << /Length 231 /Filter /FlateDecode >> stream xÚµ‘±‚0†kHná¼Ђ±0’ &2˜èä`œÔÑA£3<šÂ#02Î^KL%!_sý{½þ¬æI‚!.qa¼@¥ðÁCT±Ý9ß +@P% 7º ²Øâóñº‚Ìv+Œ@æxŒ0> stream xÚÕÏ;Â0 ÐtõÒ#Ô' ’VbªTŠD$˜02€`nÆQz„T d¨jœ20õXö“üYœé™žcŠš+ã4xRp“s?¶aq¼@iAîÐä W<i×x¿=Î ËÍÈ ÷ ÓØ Eá¢^¹˜6¡–­É±Câ‰:_øˆ:WóÑ«}ßÍO_ /h‰ Æmƒú ýIž™–¶ðj^¤ï endstream endobj 441 0 obj << /Length 259 /Filter /FlateDecode >> stream xÚ]Ð1NÃ@Ð¥°4¾;ÛŠBƒ¥$\ ‘ŠQ%Ú¬æ£ì\¦°v˜Y)¢yÒî·çÝT—ëk.¹æ‹Šë57 ¿UôIõJ/Kn®æäõƒ6O\¯¨¸×k*ºþþúy§bóxË[~®¸|¡nËXÊp8™ÎÙë…HDÑFä#ò°Ô々Ú~Àþ¨¨7ö'ÉQÈ”´^;LKZ+45qj@.dêtÜÇv“ù!¤¸Ç"iíÐÄÌôehÖ”ôÁjÛ]ˆÿdVçµ³½ÍSuž‡è ±ýõ?h©›ÓêgåcfKxýºëhG¿Á•¡Z endstream endobj 442 0 obj << /Length 186 /Filter /FlateDecode >> stream xÚ35Ô34S0P0RÐ5T01Q07SH1ä*ä21 (˜›Cd’s¹œ<¹ôÃL ¹ô=€Â\úž¾ %E¥©\úNÎ @Q…h žX.O†ÀOþÁN2bÌH$;É&åÁ¤=˜¬“ÿA$3˜äÿÿÿÿ?†ÿ8H¨úANò7PJÊÃç‚”ÿÇ`$ÿƒHþÿ ÀØ`ÿð(Èþßÿ ýß E` q¹zrr:é“p endstream endobj 443 0 obj << /Length 187 /Filter /FlateDecode >> stream xÚíÑ1 Â@Ð  Óä™ èfÑlì1‚[ZYˆ•ZZ(ZÇÎkÙyÛt¦Ž»‰… а{üáÃÀ»°O!õ¨­(Võh¥p‹ZÛ0¤(j.Ë ¦匴F9²1J3¦ýî°F™N¤Pf4W.ÐdI àñ˜Kü#ZX€ƒøã+üÏÞ8ä¯È’ àö„wåÂ6î .n ŸÁÉÁNÃõ<sUÃv‹öÁ848Å”Ìðn endstream endobj 444 0 obj << /Length 310 /Filter /FlateDecode >> stream xÚ…Ð1NÃ@б\XÚÆGð\œ8ÁM,… á * D” è"ÖT¹–o+ølé"ò0³³DQXOš]yþþòôx:ÁNð¨˜bYâÉÆæÙ”OG8›…£û'³¨M~ƒeaò ž›¼¾Ä×—·G“/®Îplò%ÞŽqtgê%Qmÿ3¢ "Vì–åÏŠ<³Ÿ³•èXú1f3j îÔ„MÅVl!e±y‹ ºo+ =̃ï¬Zy·Çê½ÃÎÈ[‘ÄcoFG\{SZ·êƛЦQ?ƒä‰`߈†µ™=mÿ»•;4ëMÛ?l½þœ};Y«íTj¶Ä­õj´Ó©Ú õIP×Z§ël§klku釾2#}UJ.´Ò†RÌym®Íaɽï endstream endobj 445 0 obj << /Length 137 /Filter /FlateDecode >> stream xÚ33Õ37W0P04¦æ æ )†\…\&f  ,“œËåäÉ¥®`bÆ¥ïæÒ÷ôU()*MåÒw pV0äÒwQˆ6T0ˆåòtQ```c;0ùD0ƒI~0Y"ÙÿIæÿ ò?&ù¤æDå(I²ôÿÿà"¹\=¹¹VI¢” endstream endobj 446 0 obj << /Length 301 /Filter /FlateDecode >> stream xÚ}ÑMJÅ0à)Y²é’Ø–G_]x>Á.]¹WêÒ…¢ëôh=JŽe¥ãüˆ? Ú¯if¦“tߟ ChÞ¯6 §á±s/®ßÑ\¦¼ððì£knC¿sÍ%½uÍxÞ^ߟ\s¸>kŽá® í½Ào@£B,D¸'€DdZš"-š,-ÚB/6¨3"x‰š¢äç”™œ®—ÓÊ®k‰í ƒËpÞ7q|Ì$pãFúæš¿È »ùdíL™@ÚAvüZ´H¥ÙFÓ¬¦YM«5Þk|,ZdÖìI³eb4Ðj`Môä³g!@Tt¶«`[ÈBÍ».àA8ã²EþõËwÌ•b«ÔŠW¢’üÉü'îbt7î}tû” endstream endobj 447 0 obj << /Length 305 /Filter /FlateDecode >> stream xÚ‘½N„@LJlA² À¼€ÅgErž‰&ZY+µ´ÐhÍ=Ú> @IA烋 á·ì|ýgf.ëK xQá®Âz¯•ÿð!ðe‰õ•Y^Þý¡õÅ#†à‹[¾öE{‡_Ÿßo¾8Ü_cå‹#>UX>ûöˆ)Eà§£‰¿ŽˆN£ÈGG#›"ˆqhfHøÔ8¾ÏéäfEÊAEIÅÈ=¿ÿ„Å-ˆÎ’%$©#쵂H\ÀÕWèfä¹  Íhg™…™cgݺi†¹8iZþG«`©s+´¤É,25×ô\iÜ`2[Ì[¸¨ÈE3)Dä/ˆþbZÁ1.8Gƒ ƒ•I¬³éUuužR¯áÍ:îXÔ&¼oÝ´í]Ö¯"MºÎÝß´þÁÿéýëo endstream endobj 8 0 obj << /Type /Font /Subtype /Type3 /Name /F31 /FontMatrix [0.00836 0 0 0.00836 0 0] /FontBBox [ 2 -24 107 84 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 49 /LastChar 121 /Widths 448 0 R /Encoding 449 0 R /CharProcs 450 0 R >> endobj 448 0 obj [65.77 65.77 65.77 0 0 0 0 0 0 0 0 0 0 0 0 0 99.31 0 0 0 0 0 0 0 48.44 0 0 0 0 0 0 89.85 0 0 73.08 0 0 0 0 0 0 0 0 0 0 0 0 0 65.77 73.08 58.47 73.08 59.81 40.2 0 0 36.54 0 0 36.54 109.62 73.08 65.77 0 0 53.39 51.89 51.16 73.08 0 0 0 69.43 ] endobj 449 0 obj << /Type /Encoding /Differences [49/a49/a50/a51 52/.notdef 65/a65 66/.notdef 73/a73 74/.notdef 80/a80 81/.notdef 83/a83 84/.notdef 97/a97/a98/a99/a100/a101/a102 103/.notdef 105/a105 106/.notdef 108/a108/a109/a110/a111 112/.notdef 114/a114/a115/a116/a117 118/.notdef 121/a121] >> endobj 450 0 obj << /a49 445 0 R /a50 446 0 R /a51 447 0 R /a65 425 0 R /a73 426 0 R /a80 427 0 R /a83 428 0 R /a97 429 0 R /a98 430 0 R /a99 431 0 R /a100 432 0 R /a101 433 0 R /a102 434 0 R /a105 435 0 R /a108 436 0 R /a109 437 0 R /a110 438 0 R /a111 439 0 R /a114 440 0 R /a115 441 0 R /a116 442 0 R /a117 443 0 R /a121 444 0 R >> endobj 451 0 obj << /Length 126 /Filter /FlateDecode >> stream xÚ-ÉÏ Q…ñ£)ê$O`qžÀý—É-6c”»P¬,¤– ±å¾ùÑ·û}ÞBU)åJ½NŽ7ÆÖ¬|øã…U¢Ù*Ò,[¥I+=îÏ3MµžËÑÔÚ9Ù=S­)^h¾å&¿ó,wþ¹@º !&¸â.7ü­è endstream endobj 452 0 obj << /Length 128 /Filter /FlateDecode >> stream xÚ-ÉA A†áOSÔ—ü‡ï03;Ùv‹ËZeŠ“ƒ”ÂQ!®Ì?_«¶÷ö¼™Ÿ† §\?“ÏUèâù`ÙšSºq¾±Š´{•´ë–iãF¯çûJ[m—ò´µ^îÈXkŽš©Iß´H½.“ èc„!Æ(pÇ \EîøÀ—' endstream endobj 453 0 obj << /Length 90 /Filter /FlateDecode >> stream xÚ32Ô36V0P0bCS …C®B. Ïȉ&çr9yré‡+Xpé{€O_…’¢ÒT.}§gC.}…hCƒX.O›ºÿ@PgÃåêÉÈj* endstream endobj 454 0 obj << /Length 137 /Filter /FlateDecode >> stream xÚ32Ô36V0P0SÐ54U04S06RH1ä*ä² (˜˜Ce’s¹œ<¹ôÃ,¹ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. 6 u ÿ¡°ŽÁ†€«oøßPß`ÛÀ…Ì Ì Ì ì l | < 2   \®ž\\ Í"Ð endstream endobj 455 0 obj << /Length 94 /Filter /FlateDecode >> stream xÚ32Õ3S0P0T04Q02R0´TH1ä*ä22PASˆDr.—“'—~¸‚‘—¾‡‚)—¾§¯BIQi*—¾S€³‚!—¾‹B´¡‚A,—§‹Âÿÿ—«'W µ?  endstream endobj 456 0 obj << /Length 195 /Filter /FlateDecode >> stream xÚÐ=‚P àGHºx{ð“8‘ &¾ÁD'㤎ñh…#02°?$8BàKÚ÷Ú’&ñ*Zc€} ½!ÞBxBœR,!\[ðO§àï( ¾Ýãûõ¹ƒŸ6Hqçƒ ØzŒak±5ngÜÞ8eÅòËf»Tkv¡63¬§*íIÏRúË,êÐôF¬9ãuÓéx_kÿçzj˺j7ÃvªRµ›þa&–ïaôãfØÊÀÖÂ~;2Š endstream endobj 457 0 obj << /Length 190 /Filter /FlateDecode >> stream xÚ3µÐ33S0P0bSSSC…C®B.SßÄ1’s¹œ<¹ôÃL¸ô=€¢\úž¾ %E¥©\úNÎ @¾‹B´¡‚A,—§‹Â h``Q Ô?Æ Œÿ€Lû`Šÿ˜bÿ¦˜€)Æ?` „AT„ª‡Pö È”ü,ÐPÜÔªPØŒ–€ÍI(εGñ Ôß2þ@ h¸C f °Q? 0t¹\=¹¹¢AƒÅ endstream endobj 458 0 obj << /Length 242 /Filter /FlateDecode >> stream xÚ¥Ð1jÃ@Ð/\_@à¹@"Éɪ¢ÂT)Bª$eŠ»HGóQ|•*Œå™YÙÆi³°¼e–eù?Kîf'üÀ·3v»{þLé‡\*Äó7ß4/)~e—Rü$cŠË¯7_ÏŸY¦ ~“7ïT.0PôM$ôý.ÒYÐåÀ¤E^Ó-nö@Ý r½G 'Ùr„Î[¡Âdw!××g"Q+ã¿ÀèaëÿŒ$S=ÐœÐØŠd(¶Æô Í ´FèÑ´Cv!ð…ØO—–´³ÊŒrû¬óí¶¾kmA§-Kz¡#ÉÎn endstream endobj 459 0 obj << /Length 145 /Filter /FlateDecode >> stream xÚ35Õ31S0P0bS#SC…C®B. ßÄI$çr9yré‡+˜Xpé{E¹ô=}JŠJS¹ôœ€|…hCƒX.O…úÿ@ð…l`ÿÀø¯†ýƒ]ˆ”+‘|2D²=‘ìÈäÉŒŸd‘ C‡äÿ$äÿÿÿƒBr¹zrrÙâbÚ endstream endobj 460 0 obj << /Length 188 /Filter /FlateDecode >> stream xÚmË= Â@†áO,„ira纆P hSZYˆ•ZZ(Zg;¯µÞ$Þ@ûà:ñ§xŠwfLÜŽ¸Ã†[!›˜MÄ›ödª(¹ûÞ¬w4ÌH/dOz"™t6åãá´%=œ8$ò2äΊ²”‘[ îo@`K ‰È ±Ê ûÒ(  à5/¨ .P/¥·êà ¯ßüEuì÷ø%øà¡ —£W¸ÄîïV]®Þ6¼d<Ð3šÓSþQ endstream endobj 461 0 obj << /Length 186 /Filter /FlateDecode >> stream xÚ31Ò33S0P0RÐ5T01P05RH1ä*ä26 ¹Æ™ä\.'O.ýpcs.} 0—¾§¯BIQi*—¾S€³‚!—¾‹B´¡‚A,—§‹ó†ÿ¨;ˆ`¦aß$þ?``þ|àów†:æ ö Ìä˜0ðªì@ÄÁ &~LÀ €˜÷b<Ø"°•o@–Ÿg¨a`nn:ˆè4†z.WO®@.†dˆ endstream endobj 462 0 obj << /Length 174 /Filter /FlateDecode >> stream xÚͱ Â0Ð[:ÞÒÕ­ï4ISÁNZÁ‚N⤎‚Š®ÚO˧ô2:5EÜÎrß½<“´aÅ5›ŒMÎ{MgÊŠ*6ãïew¤²&¹æ¬ 91ÉzÁ×Ëí@²\NY“¬x£Ym©®â Ø7Ð y´q{á„O‚´Þö·½{ðúÓocm0HLR•8œD~ùÈ¡KØÐ4«iEì+9 endstream endobj 463 0 obj << /Length 187 /Filter /FlateDecode >> stream xڥα‚@ à’.¼öô8Û%ˆ‰7˜èä`œÔÑA£+ðfø(<£ÃE¼‹Hâhú mÓ¿Q8M HЄSRÒãEjšÅâ=ÙŸ0SÈ6$Rd ÓF¦–t½ÜŽÈ²ÕŒ8²œ¶œ‚ªœ@šò»/ „~ð/^kÚ } NëÖà5Nþ:W&È ífOi),ÏŸ)†åpï“Q¾#mxå¶^­á1’Ú¾¦Áo$à\á_X¿Eq endstream endobj 464 0 obj << /Length 180 /Filter /FlateDecode >> stream xÚ]Ë1‚@…áG(H¦ÙÖŽ¹€. h¤"AL¤0ÑÊÂX©¥‰m£q”=–\”ÊâkþyÅ5ã€C+ŽBŽb>)ºQ˜Øp4ý]ŽÊ ’;’+›Ik~ÜŸg’ÙfÁŠdÎ{ÅÁŠœ!4P½W ׈ÚÓ¢§ýÚ홾¦pLçUÚA×u_-€¥U R`ÔókÌE@4¸zÚ1nƒ·°ûáв -}œ…@ù endstream endobj 465 0 obj << /Length 143 /Filter /FlateDecode >> stream xÚ32Ö31W0P0b# Sc…C®B.#3 ßÄI$çr9yré‡+™qé{E¹ô=}JŠJS¹ôœ ¹ô]¢*c¹<]ÿ00°ÿg`go`°ã?ÀP#€áüÆÌüƒùIøÿÿpLª^r0û†úÿà˜ËÕ“+ ŠèQK endstream endobj 466 0 obj << /Length 235 /Filter /FlateDecode >> stream xÚuޱJÄ@†ÿba÷ÄÐ$—­.pž` A+ ¹J--í²à‹Mçkø)Sgƒˆ…²ÃÇîüÿü;õÙñœKžñQuʵ՜*z¦:uK>©¾¥û'Z¶TÜšŠKëSÑ^ñëËÛ#Ëës¶÷Šï*.×Ô®@°’.B‡OÉúîCܶÉÅKxÞBôb&'hà¶ÿc²LæiÌ¢µÀõ`_`/p`þÂáB‚×1ANµG®ºK·ÑÔlÄè`iMBˆéÿÄ.©É’&)yc eòÞ6…j4ÍLtÑÒ }PªW¿ endstream endobj 467 0 obj << /Length 152 /Filter /FlateDecode >> stream xÚ31Ò33S0P0bCS#…C®B.c ßÄI$çr9yré‡+[pé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]˜?000üG%ØA3•ù@¢þóǘ?7Ô00g¨c`þ"þ0ØC‰pâM ö õ ÿÿÿÿAp¹zrrî^ endstream endobj 468 0 obj << /Length 116 /Filter /FlateDecode >> stream xÚ32Ô36V0P0bCKSC…C®B.C3 ßÄI$çr9yré‡+šqé{E¹ô=}JŠJS¹ôœ€|…hCƒX.Oöþò ÿ{Àþà?ò?`'òø\®ž\\°„> stream xÚ­Í= Â@à )Óä‚s7?&h%Ħ´²+µT´ÕMð"9BÀÂ!ë¼4jo±ÌÎÌ›¾×‹Bö89 xëÓ‘ÂXj%›=%é%‡1é©ü’Îf|>]v¤“ù˜}Ò)¯|öÖ”¥l—J)ó‹ìb›âË Q VîÉÃ0 ö³^µÐ€ú*T7tGXë¶»Hq‘ç9 ´7€ªA£œJ™Ü˜‡)?Ð$£½Í£kC endstream endobj 470 0 obj << /Length 103 /Filter /FlateDecode >> stream xÚ32Ô36V0P0bCKS#…C®B.C3 ßÄI$çr9yré‡+šqé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ØüCþì4üþƒ—«'W ÁåFÚ endstream endobj 471 0 obj << /Length 171 /Filter /FlateDecode >> stream xÚ33Q0P0b3ccc…C®B.3rAɹ\Nž\úá f\ú@Q.}O_…’¢ÒT.}§gC.}…hCƒX.Oæòþ30üÿPÿƒùÿ†ÿÔð3…>7ØÉ0~``ÿÎ`oÁøƒùƒ}ˆþÃÔðAÿ`?€L`o„4ûûŒÿþÿ?þÿŸýÿè4—«'W üYz0 endstream endobj 472 0 obj << /Length 142 /Filter /FlateDecode >> stream xÚ31Ò33S0P0bCcc…C®B.c ßÄI$çr9yré‡+[pé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]˜?È`øÿ¡þÃÿþ0üÿÜPÃÀþ¡ŽùˆøÃ`%~À‰4%Ø0Ô7üÿüÿÁåêÉÈãLs endstream endobj 473 0 obj << /Length 178 /Filter /FlateDecode >> stream xÚŽ1 Â@Dg±XøMŽÝÄÅÊ@Œà‚Vb¥–Švr´%GH™"ˆk¶+áóŠæÏÈl”pÄcÆ,S–„O1]IĉËÄ;Ç e–ÌŽEȬœLÆ®ù~{œÉd›ÇdrÞÇÈæ ÝH_]XB5ºÂ R5t¥%Z„ÀÓ# Ìá®p™/¸xZþŸXÿ¯‡ï(Ñ!¨TëË®ý*¿ï³”––¶ô’ô=Ú endstream endobj 474 0 obj << /Length 189 /Filter /FlateDecode >> stream xڕαÁPà?9C“³x縭VÙšP‰&ƒ˜0+õf}”>•îи.!a´|Ã’ÿ?Q§ÇâKGZAW"_ÂPÖï8ì¹Ô—¨ÿ>­¶<ÈXÍ%ì±»œU6‘Ãþ¸a5˜%`•Ê"ÉY*¤“VÛ¶* ì 'xw$ Íœ ši4^”Oÿ@æ<ó[P~J]ýk¨F’?YÔ êb@W÷é3øÏ8¬-¾áQÆ3~ÛÅi endstream endobj 475 0 obj << /Length 191 /Filter /FlateDecode >> stream xÚ•Í;‚@àÙPl2 7¹€îâBBG‚˜¸…‰VÆJ--4Zà 8’ÜDŽ@IAÄhŒ¥Í7ɼþ@OBCš ý‚)CÏh"îj ¢÷hÂÄ¢Ú‰P-¸Ê.éz¹Q%«ù¨RÚú¤whSY;q+ZW‚hœd% p+ñ¯„¼2ˆx“ˇ®çùÙ/ÃÓáýä–¢ãpçN-9«õ ãVLñ/YÎÇ]ûç×øÛµEà endstream endobj 476 0 obj << /Length 132 /Filter /FlateDecode >> stream xÚ36Ð30Q0P0b# cc…C®B.#3 ßÄI$çr9yré‡+™qé{E¹ô=}JŠJS¹ôœ ¹ô]¢*c¹<]˜Ô0üøŸáÿãþ†ÿÏå°¿“?ÀüŠÿð70ÿ``€ãÔÁì@³þÿÇ\®ž\\`d@. endstream endobj 477 0 obj << /Length 187 /Filter /FlateDecode >> stream xÚ=Ì= Â@àWE˜Â\@Ì\@7ÙPþ€)­,ÄJ--…tÙ£åžAoÎ!ëøƒÅ×¼7óB¿«ì³æNÀºÏa÷HGúFßfw¤qBjÍ:"5—˜T²àËùz 5^N8 5åMÀþ–’)Ã:hØ<Ôb„mÑDñá|”?5k€¬b›‹®-å_+­kß"69<áš;Å—l¹¨d¿F O“âaïÈ3š%´¢VZ@U endstream endobj 478 0 obj << /Length 142 /Filter /FlateDecode >> stream xÚ3²Ô³0S0P0TÐ5T02S01SH1ä*ä22 (˜˜Cd’s¹œ<¹ôÃŒL¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž.  (¨†>€#1``ÿÀ ÿÿß0b‹˜q vÆl@Äð‡¡N†Aþû.WO®@.0Õ>™ endstream endobj 479 0 obj << /Length 144 /Filter /FlateDecode >> stream xÚ31Ò33S0P0RÐ5T01T06VH1ä*ä2¶ (›@d’s¹œ<¹ôÃŒ-¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ÌìþàÿD°`¨gKД¨G%þ30‚‰Œÿ~0ïÿÃPÇnÿ‡Aþ`ÿaÏÀåêÉÈicG endstream endobj 480 0 obj << /Length 178 /Filter /FlateDecode >> stream xÚMÌ» Â@„ᑃ˜ÖBð¼€înV+A#¸… •…‚Z *Úzy3!ïb½¥EHÜ`¼4_ñLK6Ûš%‡ÜP¬»¬C^+Ú“î¸(Yë÷²ÚRߘ±î¹LÂŒùx8mHô'V$"ž+– 2g2û#°è]}‹:<‹JNÞ5”ž¨¾AYbœ±.9=Ü ê·ë‡ØÄ(ð_R‡—|p×ÿÀ~qÇ444¥R<¥ endstream endobj 481 0 obj << /Length 213 /Filter /FlateDecode >> stream xÚ35Õ31S0P0TÐ5T05Q06RH1ä*ä25 (Cd’s¹œ<¹ôÃL¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. ц ±\ž. ÿÿËÿoüÿ•bÿÁø‡Á¾ýã9æ ?d@Ô ªa€Sê,€2ÿ àTã†5J5){fþ†ÈÔ %߯~°Ac ¤øÏ0?f€S옿¨ŒPê;b|ÀPA1¹ˆËÕ“+ >\YQ endstream endobj 482 0 obj << /Length 213 /Filter /FlateDecode >> stream xÚUαjAÆñï8ð`±µn^ Ù]ר©Æ€WI•"‚ZT XÅ}´}󛃈qn/¤ùßÀŸéêÛ;Ëš ߘÛ{¶^Z“í˪¹;¨Nów¤^ÙöIe'ULx»ùX’>?²!5â7ÃzJň÷¿n_þ‘•È}Z¢é’„pÂUÅmà­Šèˆ[¹› þ4½¯È¾\ä(¤G?BrŽHú»]3«9|$‘æ.pJi®…U;„Bþð@O½Ðœ>F½ endstream endobj 483 0 obj << /Length 210 /Filter /FlateDecode >> stream xÚ=Ê¿JÃPð2¾å®ŠùžÀ{ÓVh¡¨Ì ÔÉA„Buji¡S›GË£äî輞kÐá7œ?“ÙõTF“©Þ8}+åSÆsfc¶²¬Å>ëx.ö­ØúQûã»ØåúNK±+})Õ½J½RT-<ÒÐÃ4W¸4#–3Ý&òd¾§od_áWúï̼ üÀDP´ƒª!gê„ׄ—”—Œ³á\pº¥Ã#O:äi —5pp#`W„à+ºP¡û#÷µ<É’V- endstream endobj 484 0 obj << /Length 216 /Filter /FlateDecode >> stream xÚe1jÃPDG¨ló/¬½€-É?†¶QH*!•í2`‡\Ù:šŽ¢#¨T!¬ÌÏ'UŠÇÂÌÎ2k³GÍÕêt®¶Ô2×}!'±Å\ËypvŸ²ª$Ûª-${¦,Yõ¢ß_çƒd«×µRÝè;3Rmi Œ¢qDÒºÚ m&0í’Îyê¤wuÜ»&“¤Hzº€áÓ€YÀÕÀˆhß(ßPCðüÞo†fì¡yî\ÉøŸáÎÝ 92»à%L—²¼aç˜ÝýþyªäM~šUÊ endstream endobj 485 0 obj << /Length 182 /Filter /FlateDecode >> stream xÚÎM Â0à†‚…¡Ô­ ¡s“•î„ZÁ,]¹AP—⺶7ðJ½WèMtž”º50¼L†ŒM{6Ü—²#&¼OèBÖJ6ˆh쎔9Ò+¶–ôLnI»9ß®÷él1aÉ9¯6r9{ž*½ m7ŒÁœÀKPÀ<@BÐ)0à *P ªü/ø¨"‚H[P·œâG½ä¿Ä•,‰±š:ZÒ°oS’ endstream endobj 7 0 obj << /Type /Font /Subtype /Type3 /Name /F28 /FontMatrix [0.01338 0 0 0.01338 0 0] /FontBBox [ 1 -17 63 53 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 39 /LastChar 121 /Widths 486 0 R /Encoding 487 0 R /CharProcs 488 0 R >> endobj 486 0 obj [21.33 0 0 0 0 21.33 25.6 21.33 0 0 0 38.4 38.4 38.4 0 0 0 0 0 0 21.33 0 0 0 0 0 0 54.39 0 58.66 0 0 60.26 0 0 0 0 0 0 0 0 0 0 0 0 55.46 0 0 0 0 0 0 0 0 0 0 0 0 38.4 42.66 34.13 42.66 34.16 23.47 38.4 42.66 21.33 0 40.53 21.33 64 42.66 38.4 42.66 40.53 30.04 30.29 29.86 42.66 40.53 55.46 0 40.53 ] endobj 487 0 obj << /Type /Encoding /Differences [39/a39 40/.notdef 44/a44/a45/a46 47/.notdef 50/a50/a51/a52 53/.notdef 59/a59 60/.notdef 66/a66 67/.notdef 68/a68 69/.notdef 71/a71 72/.notdef 84/a84 85/.notdef 97/a97/a98/a99/a100/a101/a102/a103/a104/a105 106/.notdef 107/a107/a108/a109/a110/a111/a112/a113/a114/a115/a116/a117/a118/a119 120/.notdef 121/a121] >> endobj 488 0 obj << /a39 451 0 R /a44 452 0 R /a45 455 0 R /a46 453 0 R /a50 483 0 R /a51 484 0 R /a52 485 0 R /a59 454 0 R /a66 456 0 R /a68 457 0 R /a71 458 0 R /a84 459 0 R /a97 460 0 R /a98 461 0 R /a99 462 0 R /a100 463 0 R /a101 464 0 R /a102 465 0 R /a103 466 0 R /a104 467 0 R /a105 468 0 R /a107 469 0 R /a108 470 0 R /a109 471 0 R /a110 472 0 R /a111 473 0 R /a112 474 0 R /a113 475 0 R /a114 476 0 R /a115 477 0 R /a116 478 0 R /a117 479 0 R /a118 480 0 R /a119 481 0 R /a121 482 0 R >> endobj 489 0 obj << /Length 235 /Filter /FlateDecode >> stream xÚuбNÃ0€á³ò«O¯8ïÑßShÑ_Ë*úþ†>Þ?_ÐÏo/©C¿ ‡ŽÚGìîÒ8jWÃqy[ixwTËceÿÙMídÃÙuÖ­99›–SÐÌÔ. § /ÈŽû ¬J¨Af Qµ¢Ù\iÜND.”[j4Èé‰ £*A3=¹¸TÍ(÷7¦ ÆòåõŸxÕãþÌÜ/ endstream endobj 490 0 obj << /Length 190 /Filter /FlateDecode >> stream xÚ]Ì1nƒ@ÐA[ M³GðœÀ †"©0‘B)©\X©b—)Å5ÜÌe@Iòó—2Zí“fÿþ©ûÇ+ìÀ[WVUv)õKëÂÒᘂOm{ '« Ï|ÕпØÏ÷íª¡}=Z©¡³siÅ»öÉð+â€(9°ˆŸý*»Åd Á˜˜È0ù™ä‘d#Ù0‰q˜ù ¶F“Cj0'ãLÜH¢Cl×íà°¦E1‡>õú¦èŒgÙ endstream endobj 491 0 obj << /Length 177 /Filter /FlateDecode >> stream xÚ3±Ô34R0P0b3S#…C®B.˜ˆ ’HÎåròäÒW01âÒ÷Šré{ú*”•¦ré;8+ré»(D*Äryº(0þa‚ÿ8H~0ÉNe’ÿˆ¬ÿ$ÿþÿ€ýÿù@²(Áþ¤Œýó?ƒñ?2Ù2…<@‰0JÂlØ qÄ=ߨÝöìÎ`77ËÀåêÉÈI{Ç endstream endobj 492 0 obj << /Length 173 /Filter /FlateDecode >> stream xÚÎ1 Â@Ð/)¦É2'p7l!6b·´²+µ´P´NŽ–£ä)S,wé-æ1Ÿ™âÛõÒ²áiVl-ßsz‘ÙÄ—Û“JGú2éý¤;ðçý}.[ÎIW|ÉÙ\ÉU % PHˆx$ãb„ò‰ Y(º™º ׺…H üM=ÑÍTSôl@Ú¤2¨$ÒøØeˆ­Â§€vŽNôÁ1NB endstream endobj 493 0 obj << /Length 138 /Filter /FlateDecode >> stream xÚ36Ó31W0P0bccc…C®B.cC ßÄI$çr9yré‡+ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž. Œ?äþÿ¨ÿðÿþ|üçÿïãÿøÿ·ÿcÿÏÁ`Èü‡ýãö ì(Åÿÿ@Á\®ž\\³M' endstream endobj 494 0 obj << /Length 175 /Filter /FlateDecode >> stream xÚ-̱ Â@ à@‡ƒ,÷æ ¼¶çЭP+ØAÐÉAœÔÑAÑÕޣݣœop£BiÌI‡’?ä·‹yUQN6)ÉZ:xÃ2eyZÓátŦC³§²B³–M·¡ÇýyAÓl—T iéPP~Ä®%à·WÌ^‹ÚÕþZ(?üe~œ°àAp€^†žG¨™]úS)L4G€^èA‰,~&ÒFP¥ŸýÌ13|¥'ÔpÕáIKW¼ endstream endobj 495 0 obj << /Length 143 /Filter /FlateDecode >> stream xÚ36Ñ3¶P0P0aKs…C®B.#3 ßÄI$çr9yré‡+™qé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]l0p þÁŒPÌ Äì@,ÿÿÃ$Ì'3À‰ÿ±701ã~†ÿÿìø?0p¹zrrÑËLU endstream endobj 6 0 obj << /Type /Font /Subtype /Type3 /Name /F29 /FontMatrix [0.01338 0 0 0.01338 0 0] /FontBBox [ 2 0 64 52 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 65 /LastChar 116 /Widths 496 0 R /Encoding 497 0 R /CharProcs 498 0 R >> endobj 496 0 obj [66.72 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 42.98 49.12 39.3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 36.47 34.88 34.38 ] endobj 497 0 obj << /Type /Encoding /Differences [65/a65 66/.notdef 97/a97/a98/a99 100/.notdef 114/a114/a115/a116] >> endobj 498 0 obj << /a65 489 0 R /a97 490 0 R /a98 491 0 R /a99 492 0 R /a114 493 0 R /a115 494 0 R /a116 495 0 R >> endobj 499 0 obj << /Length 136 /Filter /FlateDecode >> stream xÚ32×3°P0P°PÐ5´T02P04PH1ä*ä24Š(YB¥’s¹œ<¹ôà ¹ô=€â\úž¾ %E¥©\úNÎ @Q…h ¦X.O9†ú†ÿ ÿᬠ—Àƒ€ ãÆæfv6> † $—«'W ÷ '® endstream endobj 500 0 obj << /Length 230 /Filter /FlateDecode >> stream xڥѽ Â0àá¡÷¦…¶Ø©P+ØAÐÉAœÔÑAѹ}´> stream xÚ½’¿JÄ@Æ¿%`` óÂÍ hþ`Œ×xpž` A«+á@--»|±t¾Æv¶)-­³»ÉWXZýÂd¾owç›*;9-8ã’ >+¹Êø1§*çR̸*üŸ‡gZÖ”®¹œSz-eJë~{}¢ty{É9¥+ÞäœÝS½bÉØ é È˜Þ2Ô „‹–FJŸÑ -_ñ ¾5’ÞJ5fÒ‚FÛvÑh4­PŠ"¡V»‘ƒe¬£‘ÖF T ³ûì·Íß„iÿ—Ó¹{ ÷šî=¾#0¼·÷ôs@7ÑÏIMlý£VMtsŽáç~ŸÃ!|.Gð9Ãçö—#äÛÙ˜­0¶Ì\f¼·Ý Œ{‚qo@W5ÝÑ/X¸’I endstream endobj 502 0 obj << /Length 112 /Filter /FlateDecode >> stream xÚ36Õ34W0P0a#3 …C®B.# ßÄI$çr9yré‡+Ypé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]þÿÿÿ†€ˆ¡þƒý‘A~y) ¸\=¹¹Áƒi° endstream endobj 503 0 obj << /Length 171 /Filter /FlateDecode >> stream xÚåÌ1 Â@Ð [~¡ò/ »1F“JˆL!he!Vj§ ¢uöh%G°L²î‚……7pŠWÌÀÄj RVsÈ£˜Ç BºRäJœϲ?SVÜp”’\Øšd±äûíq$™­f’Ìy²ÚQ‘3ºÆ´_@ x6ÿÂÔQj‹yþÂka´–Dƒ D~ü:èVðhˆªt—%¨š´¦7¥Tm endstream endobj 504 0 obj << /Length 258 /Filter /FlateDecode >> stream xÚ}ÒÁJ1à ] {-(tžÀdiµñb¡Vp‚ž<ˆPY¥§R=wÁ[ðEú{ÜÃÒ8Szh»M ß$‡dÈo¯/C2tÉÓéÊÒ{ŠŸ8²\)å _à$CýL#‹úžwQgôýõózòxK)ê)½¤d^1›’sðˆ]ã\)Jö¥vÚ,×¢³ú´æ•hp ¼å½5¢?f|#¨ßC­XQäÓ˜éxÕçFºGJøù=¯bnÄxujQüüÒ+Ø€*üZAÇ€úe7 dÝk)®L@Q= H5eKÀá ˆÿFTµ¥¸¸Ù*q[qœ«àœƒ(ùk ï2|Â]áÍã endstream endobj 505 0 obj << /Length 252 /Filter /FlateDecode >> stream xÚ¥Ò½jÃ0p †[ò¹hd‡`e3$)ÔC ™2”@ íØ!!Gó£è­ ©ï3üU?&Æ ûw†ã0ÿ ó,N=jÂô7˜>ÌFTÒ¿ž¸‘Ux4·ÙF=„E_¹%¸\áµ€=Ü/ɸh endstream endobj 506 0 obj << /Length 229 /Filter /FlateDecode >> stream xÚuϱJAà¹ba ï ¼yÝÙhº…Á+­RˆPK E;1 ¾Øt¾Æ½±»âp½‹ S|Å?;?¬ŸÏxžjösö3¾­éüTCÆÍÍ=-r+öSrg“kÎùéñùŽÜââ„krK¾ªyrMÍ’a{è„Õ®lBŠ-`a:`Ðu)xªu‹w­äG½W‹˜ÕùÇ2©&e˯œɦá¶ÏÚnh›‡Î ÙÍhüuð‡aǨ‡k}ÿ¡ Þ[ bÔªµoŸb»ý"E“z“†O¾€Nº¤oÉŒla endstream endobj 507 0 obj << /Length 210 /Filter /FlateDecode >> stream xÚuÏ1jÃ0àg<þÅ7ˆÿ 4²‘ã1'…z(¤S‡$ MH×XGÓQ|„ŒJÝW\(TˆôúŸ 7uN3uúk‘i1Ó}.Gq%CËáf÷&u#öU])ö‰±ØæYϧƒØzµÐ\ìR×¹fi–Šè €éÆWà‚Op_ÝPIÓ!õ I@Ò*¤#f %×#ý¸~á,üK{ÇT#ç¼³¶,„ΰq`É(°nìYÜsLøâ¾Þ–ÇF^䃷V2 endstream endobj 508 0 obj << /Length 125 /Filter /FlateDecode >> stream xÚ32×3°P0P0b#S3s…C®B.#C ßÄI$çr9yré‡+ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž. ŒØ€ÿ‚ˆ¥ˆŒþÃûæ? : æ ÿÿÿ€ .WO®@.»P endstream endobj 509 0 obj << /Length 110 /Filter /FlateDecode >> stream xÚ32×3°P0P0b#S3K…C®B.#C ßÄI$çr9yré‡+ré{E¹ô=}JŠJS¹ôœ€¢. Ñ@-±\ž. ŒþÃûæ? ŒC 1ÿcøÿÿq¹zrrp^Ú endstream endobj 510 0 obj << /Length 159 /Filter /FlateDecode >> stream xÚ35Ñ34W0P0bSC…C®B.˜ˆ ’HÎåròäÒW01çÒ÷Šré{ú*”•¦ré;8+ré»(D*Äryº(0þaüÇÀðÿûÿ@RŽý´`üÁÀþ§€ñóŸ ÿ`ø$@äÿ†z É€ ÿa/É òmÃÿÿ?ìÿÿC&¹\=¹¹?qjS endstream endobj 511 0 obj << /Length 209 /Filter /FlateDecode >> stream xÚ= Â@…GR¦É2ÐMtý©bSZYˆ•ZZ(Ú‰ÉÑr2EH|›((vÂðí̛ݷ«Ga_<éIÛ=Ý—½Ï'Ö]ˆžQêÎîÈAÄj-ºËj™U´Ëùz`,§â³ eã‹·å(¢8!"«Ê@'-À1¹à4r²Sjed=L A Ñ‹]l»ÓŒßÄñ V0ùee˜þǯÛ̬äsnãÄ…«òíž ²Áœ¬Ì”/óÍKÝ´í*ëßàYÄ+~PûZ> endstream endobj 512 0 obj << /Length 218 /Filter /FlateDecode >> stream xڭнŽÂ0 p[*yé#à€4"€øè€t7Ýpº ‘Á }4¥Ð±CHpH'n¼[~ƒ­8{`zzÄ9÷¹«Ç<Ðl o5É„jÎÃ~ÛÚìiVúb3"µ’:©bÍçÓeGjö1gMjÁßšó*Œ6±Þf¾'i%°ôQ|”p”Þ´Dй£+”7Y´¦Ñ&˜Dí»èþêï™ñÇÖºÍã^ÙÜ+­džF˰ÅU6ºƒ´uÒˆ“¬;Ò‰wþÛĽoÞ¤eAŸô$”Šš endstream endobj 513 0 obj << /Length 144 /Filter /FlateDecode >> stream xÚ36׳4R0P0a3…C®B.c˜ˆ ’HÎåròäÒW06âÒ÷Šré{ú*”•¦ré;8+ré»(D*Äryº(0ÿ`þðÿ‡üŸÿ?lìþÿ(¨gÿñà?óÏÿ6ügü  u@lÃøŸñþC{Ì ´÷ÿÿpÌåêÉÈÈöPê endstream endobj 514 0 obj << /Length 213 /Filter /FlateDecode >> stream xÚMͱNÃ@б\DÚæÚTdëä""R.HE¨€’’‹ˆøÓü)÷ ‡h®°¼Œ!Åkfg´¾:[œë\½ž–ê—ºXêS)¯âK†såí÷òø"›ZŠ;õ¥׌¥¨oôýíãYŠÍí¥2Ýê=7Roë0ͬ¯&aÖ8äéYZi4 % :šŽú£¬1X[ÀÌz83L̺ܘE†œ[yß!8}†?£øË+–÷ÔðO2dñ»ÍÃWtm8 è\„\Õ²“uYÛ endstream endobj 515 0 obj << /Length 160 /Filter /FlateDecode >> stream xÚ36׳4R0P0RÐ5T06V03TH1ä*ä26PA3#ˆLr.—“'—~¸‚±—¾P˜KßÓW¡¤¨4•Kß)ÀYÁKßE!ÚPÁ –ËÓEó¡a9$lÄuPüˆÙXþÿÿÿ¡$N#ÌC®ca¨gc{ ùù ì00þ?À”àrõä äùJm endstream endobj 516 0 obj << /Length 162 /Filter /FlateDecode >> stream xÚÍË1 Â@…á·¤L¡°˜ èfqCÊ@Œà‚Vb--+'GË‘<@Ⱥ!Xè l¾âý3©™ŒžóÔpjØZ>ºíÇ„m:”êL…#½c›‘^…™´[óíz?‘.6 6¤KÞNäJV- ð-rÿeÜByD¡z 7ÿ«ÿU}Ä`‡(øD,uxIƒé0nÒ·WR héhKo©b“ endstream endobj 517 0 obj << /Length 248 /Filter /FlateDecode >> stream xÚeпJÄ@ðo \`^›B¼yÝÍ] ç ¦´²á@-íÄÛG²´Ì£äR^w¢ùÃÙüŠ™]¾™9ŽŽâ„ Oùpj8>åxƽPS5œÌþZ÷O´LIßpœ¾puÒé%¿½¾?’^^qDzÅ·›;JW\×…ªË¡~ lr¯&V‰÷g¸î¾{„'À´N2¬;säÀ8GÖêÊvn=§·õЪÊQoåb]pл ~‹‹¯^¶ã8ëõí®Ø:úg00ìœ7~Êžî¿®JT¥Ä٠Ͼüœ4s”M^!ÒyJ×ô[ÍX' endstream endobj 518 0 obj << /Length 207 /Filter /FlateDecode >> stream xÚ½½ ÂP F¿Ò¡¥Ð¼€ÞVn«“‚?`A'qRGE7Áúf}”>BÇÅšÞ‚Šè*3$|9º×î†ì³æV‡uÈQÄÛ€¤}®+ê5“Íž†1©%kŸÔTڤ⟎ç©á|Ä©1¯öר8Ux·èã”À*à%V7±38©“ÂÎ \Aî&°rOP ådeyÜ¿¡>Xý ?c\%éý#øë£æË'q¶(I£©fÔ‰µNšÄ´ ƒ…) endstream endobj 519 0 obj << /Length 131 /Filter /FlateDecode >> stream xÚ3±Ð37U0P°bC33…C®B.c# ßÄI$çr9yré‡+qé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<] >00013Ëñÿ ÿAø9³ùà óÿúóCýÿÿÿa˜ËÕ“+ Ìt^@ endstream endobj 520 0 obj << /Length 259 /Filter /FlateDecode >> stream xÚ]ÐÁJ…@ÆñOf!"·."ç åÚÍE0p»A.‚Zµˆ ¨vµ ôÑ|Á¥‹ËÎgH0?˜ñ?p´¬NÎNmn¹ÊÒ®×ö¹wYUºÏ¹å‹§7ÙÔâîìªw¥§âêkûùñõ"nssa q[{_ØüAê­…ÙÈB´aD4%;˜>Ú#îp¨§Ýà{%*eÌdl”鈧W”]èHÿ‹ùOË·ž¦…dfä 3Âױt¢KÒ‡óF¼oæû¼³MØfl=³oÂ,"†EÌ"pLΉ~WІh–Fš¥F³*Ö4×€& !Œ3ž´DWþËZnåÎvj endstream endobj 521 0 obj << /Length 262 /Filter /FlateDecode >> stream xÚu½JÅ@…O˜Â}‹;/ I$7¦ \¯` A+ ±RKAE;¹ÙGË£ì#¤L2Î&"þ _±»ÌùæÕÑÉš3.øð˜‹5—%ßçôLEÆá”Õòr÷H›†Òk.2JÏõšÒæ‚__Þ(Ý\žrNé–orÎn©Ù2 ñ€•hÝŒØ!P#îa]âa:ã‘xÛ-ˆûÚ}bh~mhœ!?0…áÿB~! ø?#;CsŨ¨Ð^À¾¨ßJÔ´¼ãIPG^`ÄM !A#`ü‹xøBo’~^°Ö}gt ëtÚ†ºìpêY…zL¨Gûðê%NýCŒí:kèŠ>¹Œƒy endstream endobj 5 0 obj << /Type /Font /Subtype /Type3 /Name /F18 /FontMatrix [0.01004 0 0 0.01004 0 0] /FontBBox [ 2 -20 84 70 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 44 /LastChar 121 /Widths 522 0 R /Encoding 523 0 R /CharProcs 524 0 R >> endobj 522 0 obj [27.08 0 0 0 48.75 48.75 48.75 0 0 0 0 0 0 48.75 0 0 0 0 0 0 0 0 69.05 70.42 0 0 0 0 0 35.17 50.09 0 0 89.34 0 0 0 0 71.76 0 0 0 0 0 0 0 0 0 0 0 0 0 0 48.75 0 0 0 43.33 0 0 0 27.08 0 0 27.08 0 54.17 48.75 54.17 0 37.92 38.46 37.92 54.17 0 0 0 51.46 ] endobj 523 0 obj << /Type /Encoding /Differences [44/a44 45/.notdef 48/a48/a49/a50 51/.notdef 57/a57 58/.notdef 66/a66/a67 68/.notdef 73/a73/a74 75/.notdef 77/a77 78/.notdef 82/a82 83/.notdef 97/a97 98/.notdef 101/a101 102/.notdef 105/a105 106/.notdef 108/a108 109/.notdef 110/a110/a111/a112 113/.notdef 114/a114/a115/a116/a117 118/.notdef 121/a121] >> endobj 524 0 obj << /a44 499 0 R /a48 518 0 R /a49 519 0 R /a50 520 0 R /a57 521 0 R /a66 500 0 R /a67 501 0 R /a73 502 0 R /a74 503 0 R /a77 504 0 R /a82 505 0 R /a97 506 0 R /a101 507 0 R /a105 508 0 R /a108 509 0 R /a110 510 0 R /a111 511 0 R /a112 512 0 R /a114 513 0 R /a115 514 0 R /a116 515 0 R /a117 516 0 R /a121 517 0 R >> endobj 525 0 obj << /Length 106 /Filter /FlateDecode >> stream xÚ36×31R0P0F¦ fF )†\…\`¾ˆ f%çr9yré‡y\ú@a.}O_…’¢ÒT.}§gC.}…hCƒX.O~ûõþ@Àúöø¨ pÙÁåêÉÈ1V2ê endstream endobj 526 0 obj << /Length 300 /Filter /FlateDecode >> stream xÚµ“±n„0 †ƒNòÂ#'( µ¦H׫T†JíÔ¡êÔëØ¡U;ãñ(<#"/ľʨÛE"|NœÈþ×7W×¥Îu…_¥ëJ ø„²vvŽ&n¼}À¾ìY—5d÷n²æAý¼C¶¼ÕdýRèüšƒ¶§1*¥> stream xÚÕÔ±nƒ0`"¤[xî jÈÐ )M¥2Tj§ U¦´c‡FéLGá,\Û´ç“rJÒn±„õÙHðý.òùÍòs,íe±,ñµ€(vœ»¡»±{‡U jƒåÔƒU?âaÿùjõt‡¨5¾˜o¡^£ñ­l›h»ÀÁOkÛ5£cå&RóeûÌqfÚ_FMOôýÄTã‘83ÄÈ=í‡MK¬:bÖSƘhbÌ8ÝrÇt+ ´/!±#V2{‰ãpŽZbúŽ×Àÿ}ÿ;Ãå”·EÞB¶ÇlçM{º`äâbÕÇÊ“‘Õ/+pVö, ¦ Á¡´Ä!C‰–¢9ÅÔ‡7ñá ‘fA?Ž8üQ÷5<Ã7ã  endstream endobj 528 0 obj << /Length 331 /Filter /FlateDecode >> stream xÚmÓAJÄ0€á”, ÙôJrÛâ …q»tåB\©KŠ®ðb‚ð7–y¾¤“÷f&)¤|I ý)¤®ê“êÌT¦Á1?5óÆ<ÕêU53œWnê6_Ô¢Såifª¼ÂUUv׿ýíãY•‹› S«riîkS=¨niFö¸IP´à(ÜŠëØ®‰ÅJˆcø:`Žï#XûÁQÚ}ºcG¢f›sØ¡ ”ÌÌGRøž‰>"¢ïÙr$j¦ˆè{m dúˆˆ¾gKÓõŽDÍt1]ÑJ¦‹ˆézˆ±gbq$j&F$ˆ=L(™‘ ö0bÏl×Ì‘¨™Å&Åvø(™|'(à‡ùKì™íˆšY$™')™Y’Âñ> stream xÚÅÔ±NÃ0Ы×ÎÒ×@êÊÁ0·Ì=ÌãS,,D¿ uÇê‡Í+u|mÇêŠï‹ÛOö³ëîDîOæE;Y—ðЊÙDê3i S%wÎû3cH˜€ ;›<$érò”¯Ùét ?F7êðºÂ{ü‡ñ,\ endstream endobj 530 0 obj << /Length 190 /Filter /FlateDecode >> stream xÚ³4×36W0P0bK#K …C®B. 3 ßÄI$çr9yré‡+X˜qé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ìÿÃÀ"˜ ì`üÁþó ó”)ÿÁþ”Éÿ¡¾ÊdÿQÏe2ÿ¨ƒ1ÔÀ˜ 8˜pfAfœiA&óœióΔ¡œÉg2Œ2G™Ëe^L¦ýÿ0• @ “ËÕ“+ »Ïö endstream endobj 531 0 obj << /Length 287 /Filter /FlateDecode >> stream xڕѽNÃ0à‹> stream xÚåѽJÄ@ðYR¦É˜yM̲pž` A+ ±º³´P´”äÞ,÷&ñ ´ËAȸ³›„ÃÃΰ¿Ý%“ͦ‡GÇ”RFûš¦štšÒRãN2»šÚ¹ö{‹{œå˜\Ó$Ãä\Ö1É/èéñù“Ùå)Ùùœn4¥·˜Ï ܵç0Cþ v þ-¸ôˆ¸ñ0ÜypiV‚ …p-P¯‚¸ØLð"(J€Ëv×W—ÀU+ov®Œ‡-ã“ßúcDâõg˜Uâ7({ð_`üú7'4»¨¿ ÁlÃ…éâm¶sކH/@×b€±'Û¸^U Þ¶b°æÊUŒVlÿA1J·1×vÏÞ€g9^á[9×^ endstream endobj 533 0 obj << /Length 267 /Filter /FlateDecode >> stream xÚ‘±J1†'lq0…ûÞ¼€f̰pžà‚Vb¥–Š‚]òhy”}„-¯86ÎL¢œ‡• Ù/Ìü;“üq«Ó5äè¤%×QwFO-¾¢kHfçræñ×Ú;r Ú+£®éýíãíúæ‚Z´ºo©yÀaCÕ 2–i¤´å¯™5º˜À€z„>‚¬%k<&rš¥,«¶`vŒìd+q3Ëß’1«^+ü ô\úoxE<@ØG*Ðqˆ ÷ù/|AüýoŒÙ¸=˜¨×,¨¢8U(`‡Ø´ fA-©‘pœûžçÚŸ¹Ú¤Pjí"ê{mœ¤ÔIš€‘ƒã倷øYRŽ endstream endobj 534 0 obj << /Length 351 /Filter /FlateDecode >> stream xÚ­‘ÍJÄ0ǧäÈ¥¼€¶‹µ‹§Âº‚=zò ‚ =øu“mÁë£ärì!4ÎLRuD¶„™ÉÌüg¦^îW¦4•Ù;(M}hêÊÜ-Ô£ªKCÿQ•\·jÕªâÒÔ¥*NÑ®Šö̼<½Þ«bu~lªX›«…)¯U»6À_‡GzahBŸ ‚Õï„—ã›t ]æ2 º‡¦G6Da)…Æh˜rûÅÌcf÷EA¿1-Û?pλëÛÕ³«÷³î I}Òˆš6Ä¥£P€gOén Àâܘ’ÝÙ'û+ít‰c¢„036u! è’¡AÒMÄ"9Ñ%ûÈ} |H³=¤X9ÑZ±H v¹÷]Ͻãm³E=L‰QVþgÎq)Ïœ¯ïRþT7éØD]àãn²¤Çó cˆ»Æ’|´M É'bÛ<Î%øªNZu¡>ÚvÔ endstream endobj 535 0 obj << /Length 219 /Filter /FlateDecode >> stream xÚ37ѳ°T0P0bsCC…C®B.33 €˜’JÎåròäÒW03ãÒ÷ sé{ú*”•¦ré;8+ré»(D*Äryº(00`öÿPÆ"Œ0C=Ã~d3ê@Ìÿÿ@üÿÿCö àP³?PÁ ÿÌøÀÀÿÄ8x€ýˆq¸¤Íþ83˜qÈøe0‚w`Œ0H+Èû¸p3Œ2¨ÆÅ>ãÿ òÌÀøþÿÿÿf qƒËÕ“+ ‡ÞP endstream endobj 536 0 obj << /Length 142 /Filter /FlateDecode >> stream xÚ36×31R0P0bcCKS…C®B.#ßÄ1’s¹œ<¹ôÃŒL¹ô=€¢\úž¾ %E¥©\úNÎ †\ú. ц ±\ž.  Œÿ˜ÿ30°ÿoÀŠAr 5 µTì ü@;þ£af f€áú!Žÿ``üÿè¯ÿ ȘËÕ“+ > stream xÚ36×31R0P0bc#C…C®B.#3 €˜’JÎåròäÒW02ãÒ÷ sé{ú*”•¦ré;8+ré»(D*Äryº(0°70ðÿo`ø†™˜†ëG1Õñÿ ŒÿÃúÿdÌåêÉȸ§‰ô endstream endobj 538 0 obj << /Length 207 /Filter /FlateDecode >> stream xÚíÑ¡Â0à[*–œÙ#pO@·@ ¨%0&H@! $¸ñh%Ø#L"Çu€…D´ùþ¶—KzzµÙ¢ê²™Í"\¢1’CÝÅtíõˆŒAÝ“SÔiŸÖ«Íu{СuBãˆÂ ¦ ²åà³U|0Û€ù‰Ø–ØB%/Q@Px¼·à_åQvØïʲ#€rˆO‚û ^‰Ëç7\©ëŸ‘†ýãgpÓ÷x'A~^ɼ™¹P²Ù/ÀnŠC|U¸ý endstream endobj 539 0 obj << /Length 185 /Filter /FlateDecode >> stream xÚÝÏ? ÂP ð¯,d°«ƒÐœÀ×ÚVt*øì èä ‚ Ž‚ŠÎ¯GëQzÇNÆ÷:ˆƒx‡üÈ—@ i¿—Drj*ñ æCDJb“Cíb¢qNjÍILjn¦¤òß®÷#©ñr©)oÌ™-åS†¯†/ž–ÂX¥ˆSeF·Ô•+^¡+ˆkÛª»d%ôA¢è3ðv×X}Xþ´øÅ~äÈö"õ7i–ÓŠ^¤Ds. endstream endobj 540 0 obj << /Length 281 /Filter /FlateDecode >> stream xÚuÐ1NÄ0Ð¥ˆäÆGð\’o$"-‹D $¨(PR€ [mr®â›#¸Lv˜q v š'Ù3þ3Éêì´n¨"O'5ùsj<=׿Íx/—5«¥òôjÖ)ïÉ{S^˵)»úxÿ|1åúö’jSn衦êÑt8ä€å©zÞ[dŒö yDñbDΰƒtÁ‰=Z¨b‹è°M΢ýÇûyqPû¡©“Újë•e^Œ5X*³>ìYëŽYžÌ:#•õB´IjÆ!¥MlGÕ-ƨéÉâH]$?r>Pçäcš6òŸA§Ù ÓìÖ~¢þ¥I"v˜¶ÈfD7¸ˆ(Ÿ0æºl@/]æª3wæׄŒœ endstream endobj 541 0 obj << /Length 191 /Filter /FlateDecode >> stream xÚ35Ò31T0P0RÐ5T01U°°PH1ä*ä21 (XXBd’s¹œ<¹ôÃLŒ¸ô=€Â\úž¾ %E¥©\úNÎ †\ú. Ñ@ƒb¹<] @€ò>’ƒdF"Ù‘H~$RLÚƒÉz0ùD2ƒIþÿ@ÀðƒD1aˆ’Œ¨L²ÿ``n@'Ù˜ÿ0°3€H~`¼ücà1ƒ(¸l@Aÿà(ÀáÍþÿ8¸\=¹¹~@‡Ø endstream endobj 542 0 obj << /Length 268 /Filter /FlateDecode >> stream xÚ}Ð1K1ÀñWn(¼Áûž/ ¹T‰„ƒZÁÄI…* nwâËÖ¯qŸ@2ÞP.¾äR0‘:¼ðK2äONä¡<¦‚ft I’šÑ£ÄTŠ RGÃÍÃ3.*·¤ŠK>FQ]ÑÛëûŠÅõ9IKº“TÜcµ$km™µúŒlvÃÓ2JP;L5o<š-ÜDØw0¹ÃÄ¡ ;Ì#ð3ðÁ“9¬~cÔóÒF°<à cp¼GÍh> /FirstChar 58 /LastChar 118 /Widths 543 0 R /Encoding 544 0 R /CharProcs 545 0 R >> endobj 543 0 obj [37.42 0 0 0 0 0 0 0 95.47 0 102.96 0 0 0 0 0 0 0 0 0 101.06 0 0 0 99.22 0 97.37 0 0 0 0 0 0 0 0 0 0 0 0 67.4 0 0 74.89 59.9 0 67.4 74.89 37.42 0 0 37.42 0 74.89 0 0 0 52.41 53.16 52.41 0 71.14 ] endobj 544 0 obj << /Type /Encoding /Differences [58/a58 59/.notdef 66/a66 67/.notdef 68/a68 69/.notdef 78/a78 79/.notdef 82/a82 83/.notdef 84/a84 85/.notdef 97/a97 98/.notdef 100/a100/a101 102/.notdef 103/a103/a104/a105 106/.notdef 108/a108 109/.notdef 110/a110 111/.notdef 114/a114/a115/a116 117/.notdef 118/a118] >> endobj 545 0 obj << /a58 525 0 R /a66 526 0 R /a68 527 0 R /a78 528 0 R /a82 529 0 R /a84 530 0 R /a97 531 0 R /a100 532 0 R /a101 533 0 R /a103 534 0 R /a104 535 0 R /a105 536 0 R /a108 537 0 R /a110 538 0 R /a114 539 0 R /a115 540 0 R /a116 541 0 R /a118 542 0 R >> endobj 10 0 obj << /Type /Pages /Count 6 /Parent 546 0 R /Kids [2 0 R 12 0 R 18 0 R 21 0 R 24 0 R 27 0 R] >> endobj 35 0 obj << /Type /Pages /Count 6 /Parent 546 0 R /Kids [33 0 R 37 0 R 40 0 R 43 0 R 46 0 R 49 0 R] >> endobj 54 0 obj << /Type /Pages /Count 6 /Parent 546 0 R /Kids [52 0 R 56 0 R 59 0 R 62 0 R 65 0 R 68 0 R] >> endobj 73 0 obj << /Type /Pages /Count 4 /Parent 546 0 R /Kids [71 0 R 75 0 R 78 0 R 81 0 R] >> endobj 546 0 obj << /Type /Pages /Count 22 /Kids [10 0 R 35 0 R 54 0 R 73 0 R] >> endobj 547 0 obj << /Type /Catalog /Pages 546 0 R >> endobj 548 0 obj << /Producer (pdfTeX-1.40.10) /Creator (TeX) /CreationDate (D:20120619120432+09'30') /ModDate (D:20120619120432+09'30') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.1415926-1.40.10-2.2 (TeX Live 2009/Debian) kpathsea version 5.0.0) >> endobj xref 0 549 0000000000 65535 f 0000002369 00000 n 0000002264 00000 n 0000000015 00000 n 0000178925 00000 n 0000171929 00000 n 0000164935 00000 n 0000161714 00000 n 0000151852 00000 n 0000142341 00000 n 0000179974 00000 n 0000004013 00000 n 0000003905 00000 n 0000002492 00000 n 0000118062 00000 n 0000098125 00000 n 0000085597 00000 n 0000006049 00000 n 0000005941 00000 n 0000004129 00000 n 0000008159 00000 n 0000008051 00000 n 0000006142 00000 n 0000010410 00000 n 0000010302 00000 n 0000008252 00000 n 0000012891 00000 n 0000012783 00000 n 0000010503 00000 n 0000070209 00000 n 0000067949 00000 n 0000064170 00000 n 0000015137 00000 n 0000015029 00000 n 0000013020 00000 n 0000180083 00000 n 0000017670 00000 n 0000017562 00000 n 0000015230 00000 n 0000019835 00000 n 0000019727 00000 n 0000017799 00000 n 0000021928 00000 n 0000021820 00000 n 0000019928 00000 n 0000024284 00000 n 0000024176 00000 n 0000022021 00000 n 0000026332 00000 n 0000026224 00000 n 0000024424 00000 n 0000028997 00000 n 0000028889 00000 n 0000026425 00000 n 0000180193 00000 n 0000031148 00000 n 0000031040 00000 n 0000029126 00000 n 0000033694 00000 n 0000033586 00000 n 0000031229 00000 n 0000036326 00000 n 0000036218 00000 n 0000033775 00000 n 0000038837 00000 n 0000038729 00000 n 0000036467 00000 n 0000040155 00000 n 0000040047 00000 n 0000038966 00000 n 0000042166 00000 n 0000042058 00000 n 0000040260 00000 n 0000180303 00000 n 0000044511 00000 n 0000044403 00000 n 0000042259 00000 n 0000046766 00000 n 0000046658 00000 n 0000044640 00000 n 0000048102 00000 n 0000047994 00000 n 0000046859 00000 n 0000048195 00000 n 0000048439 00000 n 0000048683 00000 n 0000048861 00000 n 0000049065 00000 n 0000049235 00000 n 0000049475 00000 n 0000049653 00000 n 0000049834 00000 n 0000050008 00000 n 0000050236 00000 n 0000050581 00000 n 0000050823 00000 n 0000051105 00000 n 0000051370 00000 n 0000051673 00000 n 0000051928 00000 n 0000052184 00000 n 0000052423 00000 n 0000052723 00000 n 0000052907 00000 n 0000053125 00000 n 0000053427 00000 n 0000053641 00000 n 0000053939 00000 n 0000054238 00000 n 0000054526 00000 n 0000054766 00000 n 0000055041 00000 n 0000055334 00000 n 0000055556 00000 n 0000055858 00000 n 0000056213 00000 n 0000056471 00000 n 0000056734 00000 n 0000056982 00000 n 0000057243 00000 n 0000057498 00000 n 0000057710 00000 n 0000057990 00000 n 0000058217 00000 n 0000058416 00000 n 0000058683 00000 n 0000058869 00000 n 0000059114 00000 n 0000059332 00000 n 0000059570 00000 n 0000059837 00000 n 0000060104 00000 n 0000060309 00000 n 0000060559 00000 n 0000060778 00000 n 0000060998 00000 n 0000061246 00000 n 0000061522 00000 n 0000061792 00000 n 0000062077 00000 n 0000062334 00000 n 0000062578 00000 n 0000062775 00000 n 0000063057 00000 n 0000063307 00000 n 0000063581 00000 n 0000063879 00000 n 0000064419 00000 n 0000064882 00000 n 0000065415 00000 n 0000066275 00000 n 0000066470 00000 n 0000066722 00000 n 0000066973 00000 n 0000067213 00000 n 0000067460 00000 n 0000067712 00000 n 0000068196 00000 n 0000068258 00000 n 0000068343 00000 n 0000068457 00000 n 0000068654 00000 n 0000068919 00000 n 0000069187 00000 n 0000069430 00000 n 0000069691 00000 n 0000069963 00000 n 0000070456 00000 n 0000070518 00000 n 0000070603 00000 n 0000070717 00000 n 0000070990 00000 n 0000071258 00000 n 0000071453 00000 n 0000071650 00000 n 0000071906 00000 n 0000072158 00000 n 0000072343 00000 n 0000072595 00000 n 0000072812 00000 n 0000072992 00000 n 0000073230 00000 n 0000073418 00000 n 0000073654 00000 n 0000073845 00000 n 0000074110 00000 n 0000074381 00000 n 0000074645 00000 n 0000074873 00000 n 0000075181 00000 n 0000075382 00000 n 0000075571 00000 n 0000075775 00000 n 0000076026 00000 n 0000076304 00000 n 0000076621 00000 n 0000076826 00000 n 0000077074 00000 n 0000077346 00000 n 0000077614 00000 n 0000077877 00000 n 0000078151 00000 n 0000078433 00000 n 0000078668 00000 n 0000079002 00000 n 0000079244 00000 n 0000079457 00000 n 0000079736 00000 n 0000079932 00000 n 0000080184 00000 n 0000080420 00000 n 0000080684 00000 n 0000080965 00000 n 0000081204 00000 n 0000081470 00000 n 0000081706 00000 n 0000081935 00000 n 0000082202 00000 n 0000082457 00000 n 0000082740 00000 n 0000083058 00000 n 0000083315 00000 n 0000083599 00000 n 0000083821 00000 n 0000084124 00000 n 0000084431 00000 n 0000084693 00000 n 0000084981 00000 n 0000085303 00000 n 0000085847 00000 n 0000086275 00000 n 0000086849 00000 n 0000087662 00000 n 0000087846 00000 n 0000088029 00000 n 0000088366 00000 n 0000088655 00000 n 0000088999 00000 n 0000089276 00000 n 0000089541 00000 n 0000089883 00000 n 0000090213 00000 n 0000090459 00000 n 0000090773 00000 n 0000091117 00000 n 0000091350 00000 n 0000091728 00000 n 0000092008 00000 n 0000092273 00000 n 0000092550 00000 n 0000092822 00000 n 0000093025 00000 n 0000093211 00000 n 0000093480 00000 n 0000093712 00000 n 0000093969 00000 n 0000094242 00000 n 0000094467 00000 n 0000094735 00000 n 0000094966 00000 n 0000095196 00000 n 0000095465 00000 n 0000095668 00000 n 0000095980 00000 n 0000096298 00000 n 0000096567 00000 n 0000096877 00000 n 0000097193 00000 n 0000097474 00000 n 0000097807 00000 n 0000098374 00000 n 0000098688 00000 n 0000099069 00000 n 0000099598 00000 n 0000099785 00000 n 0000100033 00000 n 0000100315 00000 n 0000100545 00000 n 0000100731 00000 n 0000100985 00000 n 0000101162 00000 n 0000101430 00000 n 0000101743 00000 n 0000102115 00000 n 0000102431 00000 n 0000102818 00000 n 0000103128 00000 n 0000103417 00000 n 0000103671 00000 n 0000104052 00000 n 0000104254 00000 n 0000104439 00000 n 0000104669 00000 n 0000105028 00000 n 0000105386 00000 n 0000105753 00000 n 0000106019 00000 n 0000106351 00000 n 0000106736 00000 n 0000106979 00000 n 0000107268 00000 n 0000107651 00000 n 0000108054 00000 n 0000108367 00000 n 0000108677 00000 n 0000108972 00000 n 0000109274 00000 n 0000109581 00000 n 0000109829 00000 n 0000110191 00000 n 0000110439 00000 n 0000110651 00000 n 0000110952 00000 n 0000111140 00000 n 0000111430 00000 n 0000111673 00000 n 0000111967 00000 n 0000112275 00000 n 0000112593 00000 n 0000112835 00000 n 0000113139 00000 n 0000113393 00000 n 0000113640 00000 n 0000113937 00000 n 0000114252 00000 n 0000114600 00000 n 0000114889 00000 n 0000115181 00000 n 0000115388 00000 n 0000115735 00000 n 0000116074 00000 n 0000116371 00000 n 0000116705 00000 n 0000117050 00000 n 0000117345 00000 n 0000117716 00000 n 0000118312 00000 n 0000118775 00000 n 0000119277 00000 n 0000120140 00000 n 0000120410 00000 n 0000120679 00000 n 0000120925 00000 n 0000121167 00000 n 0000121351 00000 n 0000121541 00000 n 0000121752 00000 n 0000121997 00000 n 0000122209 00000 n 0000122384 00000 n 0000122618 00000 n 0000122800 00000 n 0000123021 00000 n 0000123209 00000 n 0000123452 00000 n 0000123627 00000 n 0000123873 00000 n 0000124305 00000 n 0000124558 00000 n 0000124814 00000 n 0000125048 00000 n 0000125337 00000 n 0000125653 00000 n 0000125943 00000 n 0000126284 00000 n 0000126559 00000 n 0000126821 00000 n 0000127068 00000 n 0000127403 00000 n 0000127609 00000 n 0000127796 00000 n 0000128042 00000 n 0000128366 00000 n 0000128587 00000 n 0000128912 00000 n 0000129236 00000 n 0000129556 00000 n 0000129804 00000 n 0000130106 00000 n 0000130443 00000 n 0000130674 00000 n 0000130946 00000 n 0000131267 00000 n 0000131655 00000 n 0000132015 00000 n 0000132327 00000 n 0000132640 00000 n 0000132925 00000 n 0000133204 00000 n 0000133467 00000 n 0000133746 00000 n 0000134016 00000 n 0000134238 00000 n 0000134556 00000 n 0000134792 00000 n 0000134995 00000 n 0000135226 00000 n 0000135503 00000 n 0000135692 00000 n 0000135950 00000 n 0000136178 00000 n 0000136447 00000 n 0000136724 00000 n 0000137007 00000 n 0000137228 00000 n 0000137504 00000 n 0000137736 00000 n 0000137970 00000 n 0000138234 00000 n 0000138548 00000 n 0000138839 00000 n 0000139139 00000 n 0000139403 00000 n 0000139672 00000 n 0000139874 00000 n 0000140183 00000 n 0000140499 00000 n 0000140768 00000 n 0000141075 00000 n 0000141399 00000 n 0000141677 00000 n 0000142023 00000 n 0000142590 00000 n 0000143151 00000 n 0000143689 00000 n 0000144814 00000 n 0000145222 00000 n 0000145408 00000 n 0000145698 00000 n 0000146125 00000 n 0000146476 00000 n 0000146814 00000 n 0000147138 00000 n 0000147472 00000 n 0000147797 00000 n 0000148053 00000 n 0000148272 00000 n 0000148460 00000 n 0000148773 00000 n 0000149038 00000 n 0000149350 00000 n 0000149600 00000 n 0000149940 00000 n 0000150207 00000 n 0000150475 00000 n 0000150866 00000 n 0000151084 00000 n 0000151466 00000 n 0000152101 00000 n 0000152359 00000 n 0000152655 00000 n 0000152990 00000 n 0000153197 00000 n 0000153406 00000 n 0000153577 00000 n 0000153795 00000 n 0000153970 00000 n 0000154246 00000 n 0000154517 00000 n 0000154840 00000 n 0000155066 00000 n 0000155335 00000 n 0000155602 00000 n 0000155857 00000 n 0000156125 00000 n 0000156386 00000 n 0000156610 00000 n 0000156926 00000 n 0000157159 00000 n 0000157356 00000 n 0000157627 00000 n 0000157811 00000 n 0000158063 00000 n 0000158286 00000 n 0000158545 00000 n 0000158815 00000 n 0000159087 00000 n 0000159300 00000 n 0000159568 00000 n 0000159791 00000 n 0000160016 00000 n 0000160275 00000 n 0000160569 00000 n 0000160863 00000 n 0000161154 00000 n 0000161451 00000 n 0000161962 00000 n 0000162278 00000 n 0000162639 00000 n 0000163137 00000 n 0000163453 00000 n 0000163724 00000 n 0000163982 00000 n 0000164236 00000 n 0000164455 00000 n 0000164711 00000 n 0000165181 00000 n 0000165332 00000 n 0000165450 00000 n 0000165567 00000 n 0000165784 00000 n 0000166095 00000 n 0000166470 00000 n 0000166663 00000 n 0000166915 00000 n 0000167254 00000 n 0000167587 00000 n 0000167897 00000 n 0000168188 00000 n 0000168394 00000 n 0000168585 00000 n 0000168825 00000 n 0000169115 00000 n 0000169414 00000 n 0000169639 00000 n 0000169933 00000 n 0000170174 00000 n 0000170417 00000 n 0000170746 00000 n 0000171034 00000 n 0000171246 00000 n 0000171586 00000 n 0000172177 00000 n 0000172445 00000 n 0000172798 00000 n 0000173131 00000 n 0000173318 00000 n 0000173699 00000 n 0000174065 00000 n 0000174477 00000 n 0000174871 00000 n 0000175142 00000 n 0000175510 00000 n 0000175881 00000 n 0000176229 00000 n 0000176661 00000 n 0000176961 00000 n 0000177184 00000 n 0000177388 00000 n 0000177676 00000 n 0000177942 00000 n 0000178304 00000 n 0000178576 00000 n 0000179174 00000 n 0000179387 00000 n 0000179706 00000 n 0000180399 00000 n 0000180481 00000 n 0000180534 00000 n trailer << /Size 549 /Root 547 0 R /Info 548 0 R /ID [ ] >> startxref 180801 %%EOF ntdb-1.0/doc/design.txt000066400000000000000000001260671224151530700150740ustar00rootroot00000000000000NTDB: Redesigning The Trivial DataBase Rusty Russell, IBM Corporation 19 June 2012 Abstract The Trivial DataBase on-disk format is 32 bits; with usage cases heading towards the 4G limit, that must change. This required breakage provides an opportunity to revisit TDB's other design decisions and reassess them. 1 Introduction The Trivial DataBase was originally written by Andrew Tridgell as a simple key/data pair storage system with the same API as dbm, but allowing multiple readers and writers while being small enough (< 1000 lines of C) to include in SAMBA. The simple design created in 1999 has proven surprisingly robust and performant, used in Samba versions 3 and 4 as well as numerous other projects. Its useful life was greatly increased by the (backwards-compatible!) addition of transaction support in 2005. The wider variety and greater demands of TDB-using code has lead to some organic growth of the API, as well as some compromises on the implementation. None of these, by themselves, are seen as show-stoppers, but the cumulative effect is to a loss of elegance over the initial, simple TDB implementation. Here is a table of the approximate number of lines of implementation code and number of API functions at the end of each year: +-----------+----------------+--------------------------------+ | Year End | API Functions | Lines of C Code Implementation | +-----------+----------------+--------------------------------+ +-----------+----------------+--------------------------------+ | 1999 | 13 | 1195 | +-----------+----------------+--------------------------------+ | 2000 | 24 | 1725 | +-----------+----------------+--------------------------------+ | 2001 | 32 | 2228 | +-----------+----------------+--------------------------------+ | 2002 | 35 | 2481 | +-----------+----------------+--------------------------------+ | 2003 | 35 | 2552 | +-----------+----------------+--------------------------------+ | 2004 | 40 | 2584 | +-----------+----------------+--------------------------------+ | 2005 | 38 | 2647 | +-----------+----------------+--------------------------------+ | 2006 | 52 | 3754 | +-----------+----------------+--------------------------------+ | 2007 | 66 | 4398 | +-----------+----------------+--------------------------------+ | 2008 | 71 | 4768 | +-----------+----------------+--------------------------------+ | 2009 | 73 | 5715 | +-----------+----------------+--------------------------------+ This review is an attempt to catalog and address all the known issues with TDB and create solutions which address the problems without significantly increasing complexity; all involved are far too aware of the dangers of second system syndrome in rewriting a successful project like this. Note: the final decision was to make ntdb a separate library, with a separarate 'ntdb' namespace so both can potentially be linked together. This document still refers to “tdb†everywhere, for simplicity. 2 API Issues 2.1 tdb_open_ex Is Not Expandable The tdb_open() call was expanded to tdb_open_ex(), which added an optional hashing function and an optional logging function argument. Additional arguments to open would require the introduction of a tdb_open_ex2 call etc. 2.1.1 Proposed Solution tdb_open() will take a linked-list of attributes: enum tdb_attribute { TDB_ATTRIBUTE_LOG = 0, TDB_ATTRIBUTE_HASH = 1 }; struct tdb_attribute_base { enum tdb_attribute attr; union tdb_attribute *next; }; struct tdb_attribute_log { struct tdb_attribute_base base; /* .attr = TDB_ATTRIBUTE_LOG */ tdb_log_func log_fn; void *log_private; }; struct tdb_attribute_hash { struct tdb_attribute_base base; /* .attr = TDB_ATTRIBUTE_HASH */ tdb_hash_func hash_fn; void *hash_private; }; union tdb_attribute { struct tdb_attribute_base base; struct tdb_attribute_log log; struct tdb_attribute_hash hash; }; This allows future attributes to be added, even if this expands the size of the union. 2.1.2 Status Complete. 2.2 tdb_traverse Makes Impossible Guarantees tdb_traverse (and tdb_firstkey/tdb_nextkey) predate transactions, and it was thought that it was important to guarantee that all records which exist at the start and end of the traversal would be included, and no record would be included twice. This adds complexity (see[Reliable-Traversal-Adds]) and does not work anyway for records which are altered (in particular, those which are expanded may be effectively deleted and re-added behind the traversal). 2.2.1 Proposed Solution Abandon the guarantee. You will see every record if no changes occur during your traversal, otherwise you will see some subset. You can prevent changes by using a transaction or the locking API. 2.2.2 Status Complete. Delete-during-traverse will still delete every record, too (assuming no other changes). 2.3 Nesting of Transactions Is Fraught TDB has alternated between allowing nested transactions and not allowing them. Various paths in the Samba codebase assume that transactions will nest, and in a sense they can: the operation is only committed to disk when the outer transaction is committed. There are two problems, however: 1. Canceling the inner transaction will cause the outer transaction commit to fail, and will not undo any operations since the inner transaction began. This problem is soluble with some additional internal code. 2. An inner transaction commit can be cancelled by the outer transaction. This is desirable in the way which Samba's database initialization code uses transactions, but could be a surprise to any users expecting a successful transaction commit to expose changes to others. The current solution is to specify the behavior at tdb_open(), with the default currently that nested transactions are allowed. This flag can also be changed at runtime. 2.3.1 Proposed Solution Given the usage patterns, it seems that the“least-surprise†behavior of disallowing nested transactions should become the default. Additionally, it seems the outer transaction is the only code which knows whether inner transactions should be allowed, so a flag to indicate this could be added to tdb_transaction_start. However, this behavior can be simulated with a wrapper which uses tdb_add_flags() and tdb_remove_flags(), so the API should not be expanded for this relatively-obscure case. 2.3.2 Status Complete; the nesting flag has been removed. 2.4 Incorrect Hash Function is Not Detected tdb_open_ex() allows the calling code to specify a different hash function to use, but does not check that all other processes accessing this tdb are using the same hash function. The result is that records are missing from tdb_fetch(). 2.4.1 Proposed Solution The header should contain an example hash result (eg. the hash of 0xdeadbeef), and tdb_open_ex() should check that the given hash function produces the same answer, or fail the tdb_open call. 2.4.2 Status Complete. 2.5 tdb_set_max_dead/TDB_VOLATILE Expose Implementation In response to scalability issues with the free list ([TDB-Freelist-Is] ) two API workarounds have been incorporated in TDB: tdb_set_max_dead() and the TDB_VOLATILE flag to tdb_open. The latter actually calls the former with an argument of“5â€. This code allows deleted records to accumulate without putting them in the free list. On delete we iterate through each chain and free them in a batch if there are more than max_dead entries. These are never otherwise recycled except as a side-effect of a tdb_repack. 2.5.1 Proposed Solution With the scalability problems of the freelist solved, this API can be removed. The TDB_VOLATILE flag may still be useful as a hint that store and delete of records will be at least as common as fetch in order to allow some internal tuning, but initially will become a no-op. 2.5.2 Status Complete. Unknown flags cause tdb_open() to fail as well, so they can be detected at runtime. 2.6 TDB Files Cannot Be Opened Multiple Times In The Same Process No process can open the same TDB twice; we check and disallow it. This is an unfortunate side-effect of fcntl locks, which operate on a per-file rather than per-file-descriptor basis, and do not nest. Thus, closing any file descriptor on a file clears all the locks obtained by this process, even if they were placed using a different file descriptor! Note that even if this were solved, deadlock could occur if operations were nested: this is a more manageable programming error in most cases. 2.6.1 Proposed Solution We could lobby POSIX to fix the perverse rules, or at least lobby Linux to violate them so that the most common implementation does not have this restriction. This would be a generally good idea for other fcntl lock users. Samba uses a wrapper which hands out the same tdb_context to multiple callers if this happens, and does simple reference counting. We should do this inside the tdb library, which already emulates lock nesting internally; it would need to recognize when deadlock occurs within a single process. This would create a new failure mode for tdb operations (while we currently handle locking failures, they are impossible in normal use and a process encountering them can do little but give up). I do not see benefit in an additional tdb_open flag to indicate whether re-opening is allowed, as though there may be some benefit to adding a call to detect when a tdb_context is shared, to allow other to create such an API. 2.6.2 Status Complete. 2.7 TDB API Is Not POSIX Thread-safe The TDB API uses an error code which can be queried after an operation to determine what went wrong. This programming model does not work with threads, unless specific additional guarantees are given by the implementation. In addition, even otherwise-independent threads cannot open the same TDB (as in[TDB-Files-Cannot] ). 2.7.1 Proposed Solution Reachitecting the API to include a tdb_errcode pointer would be a great deal of churn, but fortunately most functions return 0 on success and -1 on error: we can change these to return 0 on success and a negative error code on error, and the API remains similar to previous. The tdb_fetch, tdb_firstkey and tdb_nextkey functions need to take a TDB_DATA pointer and return an error code. It is also simpler to have tdb_nextkey replace its key argument in place, freeing up any old .dptr. Internal locking is required to make sure that fcntl locks do not overlap between threads, and also that the global list of tdbs is maintained. The aim is that building tdb with -DTDB_PTHREAD will result in a pthread-safe version of the library, and otherwise no overhead will exist. Alternatively, a hooking mechanism similar to that proposed for[Proposed-Solution-locking-hook] could be used to enable pthread locking at runtime. 2.7.2 Status Incomplete; API has been changed but thread safety has not been implemented. 2.8 *_nonblock Functions And *_mark Functions Expose Implementation CTDB[footnote: Clustered TDB, see http://ctdb.samba.org ] wishes to operate on TDB in a non-blocking manner. This is currently done as follows: 1. Call the _nonblock variant of an API function (eg. tdb_lockall_nonblock). If this fails: 2. Fork a child process, and wait for it to call the normal variant (eg. tdb_lockall). 3. If the child succeeds, call the _mark variant to indicate we already have the locks (eg. tdb_lockall_mark). 4. Upon completion, tell the child to release the locks (eg. tdb_unlockall). 5. Indicate to tdb that it should consider the locks removed (eg. tdb_unlockall_mark). There are several issues with this approach. Firstly, adding two new variants of each function clutters the API for an obscure use, and so not all functions have three variants. Secondly, it assumes that all paths of the functions ask for the same locks, otherwise the parent process will have to get a lock which the child doesn't have under some circumstances. I don't believe this is currently the case, but it constrains the implementation. 2.8.1 Proposed Solution Implement a hook for locking methods, so that the caller can control the calls to create and remove fcntl locks. In this scenario, ctdbd would operate as follows: 1. Call the normal API function, eg tdb_lockall(). 2. When the lock callback comes in, check if the child has the lock. Initially, this is always false. If so, return 0. Otherwise, try to obtain it in non-blocking mode. If that fails, return EWOULDBLOCK. 3. Release locks in the unlock callback as normal. 4. If tdb_lockall() fails, see if we recorded a lock failure; if so, call the child to repeat the operation. 5. The child records what locks it obtains, and returns that information to the parent. 6. When the child has succeeded, goto 1. This is flexible enough to handle any potential locking scenario, even when lock requirements change. It can be optimized so that the parent does not release locks, just tells the child which locks it doesn't need to obtain. It also keeps the complexity out of the API, and in ctdbd where it is needed. 2.8.2 Status Complete. 2.9 tdb_chainlock Functions Expose Implementation tdb_chainlock locks some number of records, including the record indicated by the given key. This gave atomicity guarantees; no-one can start a transaction, alter, read or delete that key while the lock is held. It also makes the same guarantee for any other key in the chain, which is an internal implementation detail and potentially a cause for deadlock. 2.9.1 Proposed Solution None. It would be nice to have an explicit single entry lock which effected no other keys. Unfortunately, this won't work for an entry which doesn't exist. Thus while chainlock may be implemented more efficiently for the existing case, it will still have overlap issues with the non-existing case. So it is best to keep the current (lack of) guarantee about which records will be effected to avoid constraining our implementation. 2.10 Signal Handling is Not Race-Free The tdb_setalarm_sigptr() call allows the caller's signal handler to indicate that the tdb locking code should return with a failure, rather than trying again when a signal is received (and errno == EAGAIN). This is usually used to implement timeouts. Unfortunately, this does not work in the case where the signal is received before the tdb code enters the fcntl() call to place the lock: the code will sleep within the fcntl() code, unaware that the signal wants it to exit. In the case of long timeouts, this does not happen in practice. 2.10.1 Proposed Solution The locking hooks proposed in[Proposed-Solution-locking-hook] would allow the user to decide on whether to fail the lock acquisition on a signal. This allows the caller to choose their own compromise: they could narrow the race by checking immediately before the fcntl call.[footnote: It may be possible to make this race-free in some implementations by having the signal handler alter the struct flock to make it invalid. This will cause the fcntl() lock call to fail with EINVAL if the signal occurs before the kernel is entered, otherwise EAGAIN. ] 2.10.2 Status Complete. 2.11 The API Uses Gratuitous Typedefs, Capitals typedefs are useful for providing source compatibility when types can differ across implementations, or arguably in the case of function pointer definitions which are hard for humans to parse. Otherwise it is simply obfuscation and pollutes the namespace. Capitalization is usually reserved for compile-time constants and macros. TDB_CONTEXT There is no reason to use this over 'struct tdb_context'; the definition isn't visible to the API user anyway. TDB_DATA There is no reason to use this over struct TDB_DATA; the struct needs to be understood by the API user. struct TDB_DATA This would normally be called 'struct tdb_data'. enum TDB_ERROR Similarly, this would normally be enum tdb_error. 2.11.1 Proposed Solution None. Introducing lower case variants would please pedants like myself, but if it were done the existing ones should be kept. There is little point forcing a purely cosmetic change upon tdb users. 2.12 tdb_log_func Doesn't Take The Private Pointer For API compatibility reasons, the logging function needs to call tdb_get_logging_private() to retrieve the pointer registered by the tdb_open_ex for logging. 2.12.1 Proposed Solution It should simply take an extra argument, since we are prepared to break the API/ABI. 2.12.2 Status Complete. 2.13 Various Callback Functions Are Not Typesafe The callback functions in tdb_set_logging_function (after[tdb_log_func-Doesnt-Take] is resolved), tdb_parse_record, tdb_traverse, tdb_traverse_read and tdb_check all take void * and must internally convert it to the argument type they were expecting. If this type changes, the compiler will not produce warnings on the callers, since it only sees void *. 2.13.1 Proposed Solution With careful use of macros, we can create callback functions which give a warning when used on gcc and the types of the callback and its private argument differ. Unsupported compilers will not give a warning, which is no worse than now. In addition, the callbacks become clearer, as they need not use void * for their parameter. See CCAN's typesafe_cb module at http://ccan.ozlabs.org/info/typesafe_cb.html 2.13.2 Status Complete. 2.14 TDB_CLEAR_IF_FIRST Must Be Specified On All Opens, tdb_reopen_all Problematic The TDB_CLEAR_IF_FIRST flag to tdb_open indicates that the TDB file should be cleared if the caller discovers it is the only process with the TDB open. However, if any caller does not specify TDB_CLEAR_IF_FIRST it will not be detected, so will have the TDB erased underneath them (usually resulting in a crash). There is a similar issue on fork(); if the parent exits (or otherwise closes the tdb) before the child calls tdb_reopen_all() to establish the lock used to indicate the TDB is opened by someone, a TDB_CLEAR_IF_FIRST opener at that moment will believe it alone has opened the TDB and will erase it. 2.14.1 Proposed Solution Remove TDB_CLEAR_IF_FIRST. Other workarounds are possible, but see[TDB_CLEAR_IF_FIRST-Imposes-Performance]. 2.14.2 Status Complete. An open hook is provided to replicate this functionality if required. 2.15 Extending The Header Is Difficult We have reserved (zeroed) words in the TDB header, which can be used for future features. If the future features are compulsory, the version number must be updated to prevent old code from accessing the database. But if the future feature is optional, we have no way of telling if older code is accessing the database or not. 2.15.1 Proposed Solution The header should contain a“format variant†value (64-bit). This is divided into two 32-bit parts: 1. The lower part reflects the format variant understood by code accessing the database. 2. The upper part reflects the format variant you must understand to write to the database (otherwise you can only open for reading). The latter field can only be written at creation time, the former should be written under the OPEN_LOCK when opening the database for writing, if the variant of the code is lower than the current lowest variant. This should allow backwards-compatible features to be added, and detection if older code (which doesn't understand the feature) writes to the database. 2.15.2 Status Complete. 2.16 Record Headers Are Not Expandible If we later want to add (say) checksums on keys and data, it would require another format change, which we'd like to avoid. 2.16.1 Proposed Solution We often have extra padding at the tail of a record. If we ensure that the first byte (if any) of this padding is zero, we will have a way for future changes to detect code which doesn't understand a new format: the new code would write (say) a 1 at the tail, and thus if there is no tail or the first byte is 0, we would know the extension is not present on that record. 2.16.2 Status Complete. 2.17 TDB Does Not Use Talloc Many users of TDB (particularly Samba) use the talloc allocator, and thus have to wrap TDB in a talloc context to use it conveniently. 2.17.1 Proposed Solution The allocation within TDB is not complicated enough to justify the use of talloc, and I am reluctant to force another (excellent) library on TDB users. Nonetheless a compromise is possible. An attribute (see[attributes]) can be added later to tdb_open() to provide an alternate allocation mechanism, specifically for talloc but usable by any other allocator (which would ignore the“context†argument). This would form a talloc heirarchy as expected, but the caller would still have to attach a destructor to the tdb context returned from tdb_open to close it. All TDB_DATA fields would be children of the tdb_context, and the caller would still have to manage them (using talloc_free() or talloc_steal()). 2.17.2 Status Complete, using the NTDB_ATTRIBUTE_ALLOCATOR attribute. 3 Performance And Scalability Issues 3.1 TDB_CLEAR_IF_FIRST Imposes Performance Penalty When TDB_CLEAR_IF_FIRST is specified, a 1-byte read lock is placed at offset 4 (aka. the ACTIVE_LOCK). While these locks never conflict in normal tdb usage, they do add substantial overhead for most fcntl lock implementations when the kernel scans to detect if a lock conflict exists. This is often a single linked list, making the time to acquire and release a fcntl lock O(N) where N is the number of processes with the TDB open, not the number actually doing work. In a Samba server it is common to have huge numbers of clients sitting idle, and thus they have weaned themselves off the TDB_CLEAR_IF_FIRST flag.[footnote: There is a flag to tdb_reopen_all() which is used for this optimization: if the parent process will outlive the child, the child does not need the ACTIVE_LOCK. This is a workaround for this very performance issue. ] 3.1.1 Proposed Solution Remove the flag. It was a neat idea, but even trivial servers tend to know when they are initializing for the first time and can simply unlink the old tdb at that point. 3.1.2 Status Complete. 3.2 TDB Files Have a 4G Limit This seems to be becoming an issue (so much for“trivialâ€!), particularly for ldb. 3.2.1 Proposed Solution A new, incompatible TDB format which uses 64 bit offsets internally rather than 32 bit as now. For simplicity of endian conversion (which TDB does on the fly if required), all values will be 64 bit on disk. In practice, some upper bits may be used for other purposes, but at least 56 bits will be available for file offsets. tdb_open() will automatically detect the old version, and even create them if TDB_VERSION6 is specified to tdb_open. 32 bit processes will still be able to access TDBs larger than 4G (assuming that their off_t allows them to seek to 64 bits), they will gracefully fall back as they fail to mmap. This can happen already with large TDBs. Old versions of tdb will fail to open the new TDB files (since 28 August 2009, commit 398d0c29290: prior to that any unrecognized file format would be erased and initialized as a fresh tdb!) 3.2.2 Status Complete. 3.3 TDB Records Have a 4G Limit This has not been a reported problem, and the API uses size_t which can be 64 bit on 64 bit platforms. However, other limits may have made such an issue moot. 3.3.1 Proposed Solution Record sizes will be 64 bit, with an error returned on 32 bit platforms which try to access such records (the current implementation would return TDB_ERR_OOM in a similar case). It seems unlikely that 32 bit keys will be a limitation, so the implementation may not support this (see[sub:Records-Incur-A]). 3.3.2 Status Complete. 3.4 Hash Size Is Determined At TDB Creation Time TDB contains a number of hash chains in the header; the number is specified at creation time, and defaults to 131. This is such a bottleneck on large databases (as each hash chain gets quite long), that LDB uses 10,000 for this hash. In general it is impossible to know what the 'right' answer is at database creation time. 3.4.1 Proposed Solution After comprehensive performance testing on various scalable hash variants[footnote: http://rusty.ozlabs.org/?p=89 and http://rusty.ozlabs.org/?p=94 This was annoying because I was previously convinced that an expanding tree of hashes would be very close to optimal. ], it became clear that it is hard to beat a straight linear hash table which doubles in size when it reaches saturation. Unfortunately, altering the hash table introduces serious locking complications: the entire hash table needs to be locked to enlarge the hash table, and others might be holding locks. Particularly insidious are insertions done under tdb_chainlock. Thus an expanding layered hash will be used: an array of hash groups, with each hash group exploding into pointers to lower hash groups once it fills, turning into a hash tree. This has implications for locking: we must lock the entire group in case we need to expand it, yet we don't know how deep the tree is at that point. Note that bits from the hash table entries should be stolen to hold more hash bits to reduce the penalty of collisions. We can use the otherwise-unused lower 3 bits. If we limit the size of the database to 64 exabytes, we can use the top 8 bits of the hash entry as well. These 11 bits would reduce false positives down to 1 in 2000 which is more than we need: we can use one of the bits to indicate that the extra hash bits are valid. This means we can choose not to re-hash all entries when we expand a hash group; simply use the next bits we need and mark them invalid. 3.4.2 Status Ignore. Scaling the hash automatically proved inefficient at small hash sizes; we default to a 8192-element hash (changable via NTDB_ATTRIBUTE_HASHSIZE), and when buckets clash we expand to an array of hash entries. This scales slightly better than the tdb chain (due to the 8 top bits containing extra hash). 3.5 TDB Freelist Is Highly Contended TDB uses a single linked list for the free list. Allocation occurs as follows, using heuristics which have evolved over time: 1. Get the free list lock for this whole operation. 2. Multiply length by 1.25, so we always over-allocate by 25%. 3. Set the slack multiplier to 1. 4. Examine the current freelist entry: if it is > length but < the current best case, remember it as the best case. 5. Multiply the slack multiplier by 1.05. 6. If our best fit so far is less than length * slack multiplier, return it. The slack will be turned into a new free record if it's large enough. 7. Otherwise, go onto the next freelist entry. Deleting a record occurs as follows: 1. Lock the hash chain for this whole operation. 2. Walk the chain to find the record, keeping the prev pointer offset. 3. If max_dead is non-zero: (a) Walk the hash chain again and count the dead records. (b) If it's more than max_dead, bulk free all the dead ones (similar to steps 4 and below, but the lock is only obtained once). (c) Simply mark this record as dead and return. 4. Get the free list lock for the remainder of this operation. 5. Examine the following block to see if it is free; if so, enlarge the current block and remove that block from the free list. This was disabled, as removal from the free list was O(entries-in-free-list). 6. Examine the preceeding block to see if it is free: for this reason, each block has a 32-bit tailer which indicates its length. If it is free, expand it to cover our new block and return. 7. Otherwise, prepend ourselves to the free list. Disabling right-merging (step[right-merging]) causes fragmentation; the other heuristics proved insufficient to address this, so the final answer to this was that when we expand the TDB file inside a transaction commit, we repack the entire tdb. The single list lock limits our allocation rate; due to the other issues this is not currently seen as a bottleneck. 3.5.1 Proposed Solution The first step is to remove all the current heuristics, as they obviously interact, then examine them once the lock contention is addressed. The free list must be split to reduce contention. Assuming perfect free merging, we can at most have 1 free list entry for each entry. This implies that the number of free lists is related to the size of the hash table, but as it is rare to walk a large number of free list entries we can use far fewer, say 1/32 of the number of hash buckets. It seems tempting to try to reuse the hash implementation which we use for records here, but we have two ways of searching for free entries: for allocation we search by size (and possibly zone) which produces too many clashes for our hash table to handle well, and for coalescing we search by address. Thus an array of doubly-linked free lists seems preferable. There are various benefits in using per-size free lists (see[sub:TDB-Becomes-Fragmented] ) but it's not clear this would reduce contention in the common case where all processes are allocating/freeing the same size. Thus we almost certainly need to divide in other ways: the most obvious is to divide the file into zones, and using a free list (or table of free lists) for each. This approximates address ordering. Unfortunately it is difficult to know what heuristics should be used to determine zone sizes, and our transaction code relies on being able to create a“recovery area†by simply appending to the file (difficult if it would need to create a new zone header). Thus we use a linked-list of free tables; currently we only ever create one, but if there is more than one we choose one at random to use. In future we may use heuristics to add new free tables on contention. We only expand the file when all free tables are exhausted. The basic algorithm is as follows. Freeing is simple: 1. Identify the correct free list. 2. Lock the corresponding list. 3. Re-check the list (we didn't have a lock, sizes could have changed): relock if necessary. 4. Place the freed entry in the list. Allocation is a little more complicated, as we perform delayed coalescing at this point: 1. Pick a free table; usually the previous one. 2. Lock the corresponding list. 3. If the top entry is -large enough, remove it from the list and return it. 4. Otherwise, coalesce entries in the list.If there was no entry large enough, unlock the list and try the next largest list 5. If no list has an entry which meets our needs, try the next free table. 6. If no zone satisfies, expand the file. This optimizes rapid insert/delete of free list entries by not coalescing them all the time.. First-fit address ordering ordering seems to be fairly good for keeping fragmentation low (see[sub:TDB-Becomes-Fragmented]). Note that address ordering does not need a tailer to coalesce, though if we needed one we could have one cheaply: see[sub:Records-Incur-A]. Each free entry has the free table number in the header: less than 255. It also contains a doubly-linked list for easy deletion. 3.6 TDB Becomes Fragmented Much of this is a result of allocation strategy[footnote: The Memory Fragmentation Problem: Solved? Johnstone & Wilson 1995 ftp://ftp.cs.utexas.edu/pub/garbage/malloc/ismm98.ps ] and deliberate hobbling of coalescing; internal fragmentation (aka overallocation) is deliberately set at 25%, and external fragmentation is only cured by the decision to repack the entire db when a transaction commit needs to enlarge the file. 3.6.1 Proposed Solution The 25% overhead on allocation works in practice for ldb because indexes tend to expand by one record at a time. This internal fragmentation can be resolved by having an“expanded†bit in the header to note entries that have previously expanded, and allocating more space for them. There are is a spectrum of possible solutions for external fragmentation: one is to use a fragmentation-avoiding allocation strategy such as best-fit address-order allocator. The other end of the spectrum would be to use a bump allocator (very fast and simple) and simply repack the file when we reach the end. There are three problems with efficient fragmentation-avoiding allocators: they are non-trivial, they tend to use a single free list for each size, and there's no evidence that tdb allocation patterns will match those recorded for general allocators (though it seems likely). Thus we don't spend too much effort on external fragmentation; we will be no worse than the current code if we need to repack on occasion. More effort is spent on reducing freelist contention, and reducing overhead. 3.7 Records Incur A 28-Byte Overhead Each TDB record has a header as follows: struct tdb_record { tdb_off_t next; /* offset of the next record in the list */ tdb_len_t rec_len; /* total byte length of record */ tdb_len_t key_len; /* byte length of key */ tdb_len_t data_len; /* byte length of data */ uint32_t full_hash; /* the full 32 bit hash of the key */ uint32_t magic; /* try to catch errors */ /* the following union is implied: union { char record[rec_len]; struct { char key[key_len]; char data[data_len]; } uint32_t totalsize; (tailer) } */ }; Naively, this would double to a 56-byte overhead on a 64 bit implementation. 3.7.1 Proposed Solution We can use various techniques to reduce this for an allocated block: 1. The 'next' pointer is not required, as we are using a flat hash table. 2. 'rec_len' can instead be expressed as an addition to key_len and data_len (it accounts for wasted or overallocated length in the record). Since the record length is always a multiple of 8, we can conveniently fit it in 32 bits (representing up to 35 bits). 3. 'key_len' and 'data_len' can be reduced. I'm unwilling to restrict 'data_len' to 32 bits, but instead we can combine the two into one 64-bit field and using a 5 bit value which indicates at what bit to divide the two. Keys are unlikely to scale as fast as data, so I'm assuming a maximum key size of 32 bits. 4. 'full_hash' is used to avoid a memcmp on the“miss†case, but this is diminishing returns after a handful of bits (at 10 bits, it reduces 99.9% of false memcmp). As an aside, as the lower bits are already incorporated in the hash table resolution, the upper bits should be used here. Note that it's not clear that these bits will be a win, given the extra bits in the hash table itself (see[sub:Hash-Size-Solution]). 5. 'magic' does not need to be enlarged: it currently reflects one of 5 values (used, free, dead, recovery, and unused_recovery). It is useful for quick sanity checking however, and should not be eliminated. 6. 'tailer' is only used to coalesce free blocks (so a block to the right can find the header to check if this block is free). This can be replaced by a single 'free' bit in the header of the following block (and the tailer only exists in free blocks).[footnote: This technique from Thomas Standish. Data Structure Techniques. Addison-Wesley, Reading, Massachusetts, 1980. ] The current proposed coalescing algorithm doesn't need this, however. This produces a 16 byte used header like this: struct tdb_used_record { uint32_t used_magic : 16, key_data_divide: 5, top_hash: 11; uint32_t extra_octets; uint64_t key_and_data_len; }; And a free record like this: struct tdb_free_record { uint64_t free_magic: 8, prev : 56; uint64_t free_table: 8, total_length : 56 uint64_t next;; }; Note that by limiting valid offsets to 56 bits, we can pack everything we need into 3 64-byte words, meaning our minimum record size is 8 bytes. 3.7.2 Status Complete. 3.8 Transaction Commit Requires 4 fdatasync The current transaction algorithm is: 1. write_recovery_data(); 2. sync(); 3. write_recovery_header(); 4. sync(); 5. overwrite_with_new_data(); 6. sync(); 7. remove_recovery_header(); 8. sync(); On current ext3, each sync flushes all data to disk, so the next 3 syncs are relatively expensive. But this could become a performance bottleneck on other filesystems such as ext4. 3.8.1 Proposed Solution Neil Brown points out that this is overzealous, and only one sync is needed: 1. Bundle the recovery data, a transaction counter and a strong checksum of the new data. 2. Strong checksum that whole bundle. 3. Store the bundle in the database. 4. Overwrite the oldest of the two recovery pointers in the header (identified using the transaction counter) with the offset of this bundle. 5. sync. 6. Write the new data to the file. Checking for recovery means identifying the latest bundle with a valid checksum and using the new data checksum to ensure that it has been applied. This is more expensive than the current check, but need only be done at open. For running databases, a separate header field can be used to indicate a transaction in progress; we need only check for recovery if this is set. 3.8.2 Status Deferred. 3.9 TDB Does Not Have Snapshot Support 3.9.1 Proposed Solution None. At some point you say“use a real database†(but see[replay-attribute] ). But as a thought experiment, if we implemented transactions to only overwrite free entries (this is tricky: there must not be a header in each entry which indicates whether it is free, but use of presence in metadata elsewhere), and a pointer to the hash table, we could create an entirely new commit without destroying existing data. Then it would be easy to implement snapshots in a similar way. This would not allow arbitrary changes to the database, such as tdb_repack does, and would require more space (since we have to preserve the current and future entries at once). If we used hash trees rather than one big hash table, we might only have to rewrite some sections of the hash, too. We could then implement snapshots using a similar method, using multiple different hash tables/free tables. 3.9.2 Status Deferred. 3.10 Transactions Cannot Operate in Parallel This would be useless for ldb, as it hits the index records with just about every update. It would add significant complexity in resolving clashes, and cause the all transaction callers to write their code to loop in the case where the transactions spuriously failed. 3.10.1 Proposed Solution None (but see[replay-attribute]). We could solve a small part of the problem by providing read-only transactions. These would allow one write transaction to begin, but it could not commit until all r/o transactions are done. This would require a new RO_TRANSACTION_LOCK, which would be upgraded on commit. 3.10.2 Status Deferred. 3.11 Default Hash Function Is Suboptimal The Knuth-inspired multiplicative hash used by tdb is fairly slow (especially if we expand it to 64 bits), and works best when the hash bucket size is a prime number (which also means a slow modulus). In addition, it is highly predictable which could potentially lead to a Denial of Service attack in some TDB uses. 3.11.1 Proposed Solution The Jenkins lookup3 hash[footnote: http://burtleburtle.net/bob/c/lookup3.c ] is a fast and superbly-mixing hash. It's used by the Linux kernel and almost everything else. This has the particular properties that it takes an initial seed, and produces two 32 bit hash numbers, which we can combine into a 64-bit hash. The seed should be created at tdb-creation time from some random source, and placed in the header. This is far from foolproof, but adds a little bit of protection against hash bombing. 3.11.2 Status Complete. 3.12 Reliable Traversal Adds Complexity We lock a record during traversal iteration, and try to grab that lock in the delete code. If that grab on delete fails, we simply mark it deleted and continue onwards; traversal checks for this condition and does the delete when it moves off the record. If traversal terminates, the dead record may be left indefinitely. 3.12.1 Proposed Solution Remove reliability guarantees; see[traverse-Proposed-Solution]. 3.12.2 Status Complete. 3.13 Fcntl Locking Adds Overhead Placing a fcntl lock means a system call, as does removing one. This is actually one reason why transactions can be faster (everything is locked once at transaction start). In the uncontended case, this overhead can theoretically be eliminated. 3.13.1 Proposed Solution None. We tried this before with spinlock support, in the early days of TDB, and it didn't make much difference except in manufactured benchmarks. We could use spinlocks (with futex kernel support under Linux), but it means that we lose automatic cleanup when a process dies with a lock. There is a method of auto-cleanup under Linux, but it's not supported by other operating systems. We could reintroduce a clear-if-first-style lock and sweep for dead futexes on open, but that wouldn't help the normal case of one concurrent opener dying. Increasingly elaborate repair schemes could be considered, but they require an ABI change (everyone must use them) anyway, so there's no need to do this at the same time as everything else. 3.14 Some Transactions Don't Require Durability Volker points out that gencache uses a CLEAR_IF_FIRST tdb for normal (fast) usage, and occasionally empties the results into a transactional TDB. This kind of usage prioritizes performance over durability: as long as we are consistent, data can be lost. This would be more neatly implemented inside tdb: a“soft†transaction commit (ie. syncless) which meant that data may be reverted on a crash. 3.14.1 Proposed Solution None. Unfortunately any transaction scheme which overwrites old data requires a sync before that overwrite to avoid the possibility of corruption. It seems possible to use a scheme similar to that described in[sub:TDB-Does-Not] ,where transactions are committed without overwriting existing data, and an array of top-level pointers were available in the header. If the transaction is“soft†then we would not need a sync at all: existing processes would pick up the new hash table and free list and work with that. At some later point, a sync would allow recovery of the old data into the free lists (perhaps when the array of top-level pointers filled). On crash, tdb_open() would examine the array of top levels, and apply the transactions until it encountered an invalid checksum. 3.15 Tracing Is Fragile, Replay Is External The current TDB has compile-time-enabled tracing code, but it often breaks as it is not enabled by default. In a similar way, the ctdb code has an external wrapper which does replay tracing so it can coordinate cluster-wide transactions. 3.15.1 Proposed Solution Tridge points out that an attribute can be later added to tdb_open (see[attributes]) to provide replay/trace hooks, which could become the basis for this and future parallel transactions and snapshot support. 3.15.2 Status Deferred. ntdb-1.0/free.c000066400000000000000000000630741224151530700134000ustar00rootroot00000000000000 /* Trivial Database 2: free list/block handling Copyright (C) Rusty Russell 2010 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #include #include #include #include static unsigned fls64(uint64_t val) { return ilog64(val); } /* In which bucket would we find a particular record size? (ignoring header) */ unsigned int size_to_bucket(ntdb_len_t data_len) { unsigned int bucket; /* We can't have records smaller than this. */ assert(data_len >= NTDB_MIN_DATA_LEN); /* Ignoring the header... */ if (data_len - NTDB_MIN_DATA_LEN <= 64) { /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */ bucket = (data_len - NTDB_MIN_DATA_LEN) / 8; } else { /* After that we go power of 2. */ bucket = fls64(data_len - NTDB_MIN_DATA_LEN) + 2; } if (unlikely(bucket >= NTDB_FREE_BUCKETS)) bucket = NTDB_FREE_BUCKETS - 1; return bucket; } ntdb_off_t first_ftable(struct ntdb_context *ntdb) { return ntdb_read_off(ntdb, offsetof(struct ntdb_header, free_table)); } ntdb_off_t next_ftable(struct ntdb_context *ntdb, ntdb_off_t ftable) { return ntdb_read_off(ntdb, ftable + offsetof(struct ntdb_freetable,next)); } enum NTDB_ERROR ntdb_ftable_init(struct ntdb_context *ntdb) { /* Use reservoir sampling algorithm to select a free list at random. */ unsigned int rnd, max = 0, count = 0; ntdb_off_t off; ntdb->ftable_off = off = first_ftable(ntdb); ntdb->ftable = 0; while (off) { if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } rnd = random(); if (rnd >= max) { ntdb->ftable_off = off; ntdb->ftable = count; max = rnd; } off = next_ftable(ntdb, off); count++; } return NTDB_SUCCESS; } /* Offset of a given bucket. */ ntdb_off_t bucket_off(ntdb_off_t ftable_off, unsigned bucket) { return ftable_off + offsetof(struct ntdb_freetable, buckets) + bucket * sizeof(ntdb_off_t); } /* Returns free_buckets + 1, or list number to search, or -ve error. */ static ntdb_off_t find_free_head(struct ntdb_context *ntdb, ntdb_off_t ftable_off, ntdb_off_t bucket) { /* Speculatively search for a non-zero bucket. */ return ntdb_find_nonzero_off(ntdb, bucket_off(ftable_off, 0), bucket, NTDB_FREE_BUCKETS); } static void check_list(struct ntdb_context *ntdb, ntdb_off_t b_off) { #ifdef CCAN_NTDB_DEBUG ntdb_off_t off, prev = 0, first; struct ntdb_free_record r; first = off = (ntdb_read_off(ntdb, b_off) & NTDB_OFF_MASK); while (off != 0) { ntdb_read_convert(ntdb, off, &r, sizeof(r)); if (frec_magic(&r) != NTDB_FREE_MAGIC) abort(); if (prev && frec_prev(&r) != prev) abort(); prev = off; off = r.next; } if (first) { ntdb_read_convert(ntdb, first, &r, sizeof(r)); if (frec_prev(&r) != prev) abort(); } #endif } /* Remove from free bucket. */ static enum NTDB_ERROR remove_from_list(struct ntdb_context *ntdb, ntdb_off_t b_off, ntdb_off_t r_off, const struct ntdb_free_record *r) { ntdb_off_t off, prev_next, head; enum NTDB_ERROR ecode; /* Is this only element in list? Zero out bucket, and we're done. */ if (frec_prev(r) == r_off) return ntdb_write_off(ntdb, b_off, 0); /* off = &r->prev->next */ off = frec_prev(r) + offsetof(struct ntdb_free_record, next); /* Get prev->next */ prev_next = ntdb_read_off(ntdb, off); if (NTDB_OFF_IS_ERR(prev_next)) return NTDB_OFF_TO_ERR(prev_next); /* If prev->next == 0, we were head: update bucket to point to next. */ if (prev_next == 0) { /* We must preserve upper bits. */ head = ntdb_read_off(ntdb, b_off); if (NTDB_OFF_IS_ERR(head)) return NTDB_OFF_TO_ERR(head); if ((head & NTDB_OFF_MASK) != r_off) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "remove_from_list:" " %llu head %llu on list %llu", (long long)r_off, (long long)head, (long long)b_off); } head = ((head & ~NTDB_OFF_MASK) | r->next); ecode = ntdb_write_off(ntdb, b_off, head); if (ecode != NTDB_SUCCESS) return ecode; } else { /* r->prev->next = r->next */ ecode = ntdb_write_off(ntdb, off, r->next); if (ecode != NTDB_SUCCESS) return ecode; } /* If we were the tail, off = &head->prev. */ if (r->next == 0) { head = ntdb_read_off(ntdb, b_off); if (NTDB_OFF_IS_ERR(head)) return NTDB_OFF_TO_ERR(head); head &= NTDB_OFF_MASK; off = head + offsetof(struct ntdb_free_record, magic_and_prev); } else { /* off = &r->next->prev */ off = r->next + offsetof(struct ntdb_free_record, magic_and_prev); } #ifdef CCAN_NTDB_DEBUG /* *off == r */ if ((ntdb_read_off(ntdb, off) & NTDB_OFF_MASK) != r_off) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "remove_from_list:" " %llu bad prev in list %llu", (long long)r_off, (long long)b_off); } #endif /* r->next->prev = r->prev */ return ntdb_write_off(ntdb, off, r->magic_and_prev); } /* Enqueue in this free bucket: sets coalesce if we've added 128 * entries to it. */ static enum NTDB_ERROR enqueue_in_free(struct ntdb_context *ntdb, ntdb_off_t b_off, ntdb_off_t off, ntdb_len_t len, bool *coalesce) { struct ntdb_free_record new; enum NTDB_ERROR ecode; ntdb_off_t prev, head; uint64_t magic = (NTDB_FREE_MAGIC << (64 - NTDB_OFF_UPPER_STEAL)); head = ntdb_read_off(ntdb, b_off); if (NTDB_OFF_IS_ERR(head)) return NTDB_OFF_TO_ERR(head); /* We only need to set ftable_and_len; rest is set in enqueue_in_free */ new.ftable_and_len = ((uint64_t)ntdb->ftable << (64 - NTDB_OFF_UPPER_STEAL)) | len; /* new->next = head. */ new.next = (head & NTDB_OFF_MASK); /* First element? Prev points to ourselves. */ if (!new.next) { new.magic_and_prev = (magic | off); } else { /* new->prev = next->prev */ prev = ntdb_read_off(ntdb, new.next + offsetof(struct ntdb_free_record, magic_and_prev)); new.magic_and_prev = prev; if (frec_magic(&new) != NTDB_FREE_MAGIC) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "enqueue_in_free: %llu bad head" " prev %llu", (long long)new.next, (long long)prev); } /* next->prev = new. */ ecode = ntdb_write_off(ntdb, new.next + offsetof(struct ntdb_free_record, magic_and_prev), off | magic); if (ecode != NTDB_SUCCESS) { return ecode; } #ifdef CCAN_NTDB_DEBUG prev = ntdb_read_off(ntdb, frec_prev(&new) + offsetof(struct ntdb_free_record, next)); if (prev != 0) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "enqueue_in_free:" " %llu bad tail next ptr %llu", (long long)frec_prev(&new) + offsetof(struct ntdb_free_record, next), (long long)prev); } #endif } /* Update enqueue count, but don't set high bit: see NTDB_OFF_IS_ERR */ if (*coalesce) head += (1ULL << (64 - NTDB_OFF_UPPER_STEAL)); head &= ~(NTDB_OFF_MASK | (1ULL << 63)); head |= off; ecode = ntdb_write_off(ntdb, b_off, head); if (ecode != NTDB_SUCCESS) { return ecode; } /* It's time to coalesce if counter wrapped. */ if (*coalesce) *coalesce = ((head & ~NTDB_OFF_MASK) == 0); return ntdb_write_convert(ntdb, off, &new, sizeof(new)); } static ntdb_off_t ftable_offset(struct ntdb_context *ntdb, unsigned int ftable) { ntdb_off_t off; unsigned int i; if (likely(ntdb->ftable == ftable)) return ntdb->ftable_off; off = first_ftable(ntdb); for (i = 0; i < ftable; i++) { if (NTDB_OFF_IS_ERR(off)) { break; } off = next_ftable(ntdb, off); } return off; } /* Note: we unlock the current bucket if fail (-ve), or coalesce (+ve) and * need to blatt the *protect record (which is set to an error). */ static ntdb_len_t coalesce(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_off_t b_off, ntdb_len_t data_len, ntdb_off_t *protect) { ntdb_off_t end; struct ntdb_free_record rec; enum NTDB_ERROR ecode; ntdb->stats.alloc_coalesce_tried++; end = off + sizeof(struct ntdb_used_record) + data_len; while (end < ntdb->file->map_size) { const struct ntdb_free_record *r; ntdb_off_t nb_off; unsigned ftable, bucket; r = ntdb_access_read(ntdb, end, sizeof(*r), true); if (NTDB_PTR_IS_ERR(r)) { ecode = NTDB_PTR_ERR(r); goto err; } if (frec_magic(r) != NTDB_FREE_MAGIC || frec_ftable(r) == NTDB_FTABLE_NONE) { ntdb_access_release(ntdb, r); break; } ftable = frec_ftable(r); bucket = size_to_bucket(frec_len(r)); nb_off = ftable_offset(ntdb, ftable); if (NTDB_OFF_IS_ERR(nb_off)) { ntdb_access_release(ntdb, r); ecode = NTDB_OFF_TO_ERR(nb_off); goto err; } nb_off = bucket_off(nb_off, bucket); ntdb_access_release(ntdb, r); /* We may be violating lock order here, so best effort. */ if (ntdb_lock_free_bucket(ntdb, nb_off, NTDB_LOCK_NOWAIT) != NTDB_SUCCESS) { ntdb->stats.alloc_coalesce_lockfail++; break; } /* Now we have lock, re-check. */ ecode = ntdb_read_convert(ntdb, end, &rec, sizeof(rec)); if (ecode != NTDB_SUCCESS) { ntdb_unlock_free_bucket(ntdb, nb_off); goto err; } if (unlikely(frec_magic(&rec) != NTDB_FREE_MAGIC)) { ntdb->stats.alloc_coalesce_race++; ntdb_unlock_free_bucket(ntdb, nb_off); break; } if (unlikely(frec_ftable(&rec) != ftable) || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) { ntdb->stats.alloc_coalesce_race++; ntdb_unlock_free_bucket(ntdb, nb_off); break; } /* Did we just mess up a record you were hoping to use? */ if (end == *protect) { ntdb->stats.alloc_coalesce_iterate_clash++; *protect = NTDB_ERR_TO_OFF(NTDB_ERR_NOEXIST); } ecode = remove_from_list(ntdb, nb_off, end, &rec); check_list(ntdb, nb_off); if (ecode != NTDB_SUCCESS) { ntdb_unlock_free_bucket(ntdb, nb_off); goto err; } end += sizeof(struct ntdb_used_record) + frec_len(&rec); ntdb_unlock_free_bucket(ntdb, nb_off); ntdb->stats.alloc_coalesce_num_merged++; } /* Didn't find any adjacent free? */ if (end == off + sizeof(struct ntdb_used_record) + data_len) return 0; /* Before we expand, check this isn't one you wanted protected? */ if (off == *protect) { *protect = NTDB_ERR_TO_OFF(NTDB_ERR_EXISTS); ntdb->stats.alloc_coalesce_iterate_clash++; } /* OK, expand initial record */ ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec)); if (ecode != NTDB_SUCCESS) { goto err; } if (frec_len(&rec) != data_len) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "coalesce: expected data len %zu not %zu", (size_t)data_len, (size_t)frec_len(&rec)); goto err; } ecode = remove_from_list(ntdb, b_off, off, &rec); check_list(ntdb, b_off); if (ecode != NTDB_SUCCESS) { goto err; } /* Try locking violation first. We don't allow coalesce recursion! */ ecode = add_free_record(ntdb, off, end - off, NTDB_LOCK_NOWAIT, false); if (ecode != NTDB_SUCCESS) { /* Need to drop lock. Can't rely on anything stable. */ ntdb->stats.alloc_coalesce_lockfail++; *protect = NTDB_ERR_TO_OFF(NTDB_ERR_CORRUPT); /* We have to drop this to avoid deadlocks, so make sure record * doesn't get coalesced by someone else! */ rec.ftable_and_len = (NTDB_FTABLE_NONE << (64 - NTDB_OFF_UPPER_STEAL)) | (end - off - sizeof(struct ntdb_used_record)); ecode = ntdb_write_off(ntdb, off + offsetof(struct ntdb_free_record, ftable_and_len), rec.ftable_and_len); if (ecode != NTDB_SUCCESS) { goto err; } ntdb_unlock_free_bucket(ntdb, b_off); ecode = add_free_record(ntdb, off, end - off, NTDB_LOCK_WAIT, false); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } } else if (NTDB_OFF_IS_ERR(*protect)) { /* For simplicity, we always drop lock if they can't continue */ ntdb_unlock_free_bucket(ntdb, b_off); } ntdb->stats.alloc_coalesce_succeeded++; /* Return usable length. */ return end - off - sizeof(struct ntdb_used_record); err: /* To unify error paths, we *always* unlock bucket on error. */ ntdb_unlock_free_bucket(ntdb, b_off); return NTDB_ERR_TO_OFF(ecode); } /* List is locked: we unlock it. */ static enum NTDB_ERROR coalesce_list(struct ntdb_context *ntdb, ntdb_off_t ftable_off, ntdb_off_t b_off, unsigned int limit) { enum NTDB_ERROR ecode; ntdb_off_t off; off = ntdb_read_off(ntdb, b_off); if (NTDB_OFF_IS_ERR(off)) { ecode = NTDB_OFF_TO_ERR(off); goto unlock_err; } /* A little bit of paranoia: counter should be 0. */ off &= NTDB_OFF_MASK; while (off && limit--) { struct ntdb_free_record rec; ntdb_len_t coal; ntdb_off_t next; ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec)); if (ecode != NTDB_SUCCESS) goto unlock_err; next = rec.next; coal = coalesce(ntdb, off, b_off, frec_len(&rec), &next); if (NTDB_OFF_IS_ERR(coal)) { /* This has already unlocked on error. */ return NTDB_OFF_TO_ERR(coal); } if (NTDB_OFF_IS_ERR(next)) { /* Coalescing had to unlock, so stop. */ return NTDB_SUCCESS; } /* Keep going if we're doing well... */ limit += size_to_bucket(coal / 16 + NTDB_MIN_DATA_LEN); off = next; } /* Now, move those elements to the tail of the list so we get something * else next time. */ if (off) { struct ntdb_free_record oldhrec, newhrec, oldtrec, newtrec; ntdb_off_t oldhoff, oldtoff, newtoff; /* The record we were up to is the new head. */ ecode = ntdb_read_convert(ntdb, off, &newhrec, sizeof(newhrec)); if (ecode != NTDB_SUCCESS) goto unlock_err; /* Get the new tail. */ newtoff = frec_prev(&newhrec); ecode = ntdb_read_convert(ntdb, newtoff, &newtrec, sizeof(newtrec)); if (ecode != NTDB_SUCCESS) goto unlock_err; /* Get the old head. */ oldhoff = ntdb_read_off(ntdb, b_off); if (NTDB_OFF_IS_ERR(oldhoff)) { ecode = NTDB_OFF_TO_ERR(oldhoff); goto unlock_err; } /* This could happen if they all coalesced away. */ if (oldhoff == off) goto out; ecode = ntdb_read_convert(ntdb, oldhoff, &oldhrec, sizeof(oldhrec)); if (ecode != NTDB_SUCCESS) goto unlock_err; /* Get the old tail. */ oldtoff = frec_prev(&oldhrec); ecode = ntdb_read_convert(ntdb, oldtoff, &oldtrec, sizeof(oldtrec)); if (ecode != NTDB_SUCCESS) goto unlock_err; /* Old tail's next points to old head. */ oldtrec.next = oldhoff; /* Old head's prev points to old tail. */ oldhrec.magic_and_prev = (NTDB_FREE_MAGIC << (64 - NTDB_OFF_UPPER_STEAL)) | oldtoff; /* New tail's next is 0. */ newtrec.next = 0; /* Write out the modified versions. */ ecode = ntdb_write_convert(ntdb, oldtoff, &oldtrec, sizeof(oldtrec)); if (ecode != NTDB_SUCCESS) goto unlock_err; ecode = ntdb_write_convert(ntdb, oldhoff, &oldhrec, sizeof(oldhrec)); if (ecode != NTDB_SUCCESS) goto unlock_err; ecode = ntdb_write_convert(ntdb, newtoff, &newtrec, sizeof(newtrec)); if (ecode != NTDB_SUCCESS) goto unlock_err; /* And finally link in new head. */ ecode = ntdb_write_off(ntdb, b_off, off); if (ecode != NTDB_SUCCESS) goto unlock_err; } out: ntdb_unlock_free_bucket(ntdb, b_off); return NTDB_SUCCESS; unlock_err: ntdb_unlock_free_bucket(ntdb, b_off); return ecode; } /* List must not be locked if coalesce_ok is set. */ enum NTDB_ERROR add_free_record(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len_with_header, enum ntdb_lock_flags waitflag, bool coalesce_ok) { ntdb_off_t b_off; ntdb_len_t len; enum NTDB_ERROR ecode; assert(len_with_header >= sizeof(struct ntdb_free_record)); len = len_with_header - sizeof(struct ntdb_used_record); b_off = bucket_off(ntdb->ftable_off, size_to_bucket(len)); ecode = ntdb_lock_free_bucket(ntdb, b_off, waitflag); if (ecode != NTDB_SUCCESS) { return ecode; } ecode = enqueue_in_free(ntdb, b_off, off, len, &coalesce_ok); check_list(ntdb, b_off); /* Coalescing unlocks free list. */ if (!ecode && coalesce_ok) ecode = coalesce_list(ntdb, ntdb->ftable_off, b_off, 2); else ntdb_unlock_free_bucket(ntdb, b_off); return ecode; } static size_t adjust_size(size_t keylen, size_t datalen) { size_t size = keylen + datalen; if (size < NTDB_MIN_DATA_LEN) size = NTDB_MIN_DATA_LEN; /* Round to next uint64_t boundary. */ return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL); } /* If we have enough left over to be useful, split that off. */ static size_t record_leftover(size_t keylen, size_t datalen, bool want_extra, size_t total_len) { ssize_t leftover; if (want_extra) datalen += datalen / 2; leftover = total_len - adjust_size(keylen, datalen); if (leftover < (ssize_t)sizeof(struct ntdb_free_record)) return 0; return leftover; } /* We need size bytes to put our key and data in. */ static ntdb_off_t lock_and_alloc(struct ntdb_context *ntdb, ntdb_off_t ftable_off, ntdb_off_t bucket, size_t keylen, size_t datalen, bool want_extra, unsigned magic) { ntdb_off_t off, b_off,best_off; struct ntdb_free_record best = { 0 }; double multiplier; size_t size = adjust_size(keylen, datalen); enum NTDB_ERROR ecode; ntdb->stats.allocs++; b_off = bucket_off(ftable_off, bucket); /* FIXME: Try non-blocking wait first, to measure contention. */ /* Lock this bucket. */ ecode = ntdb_lock_free_bucket(ntdb, b_off, NTDB_LOCK_WAIT); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } best.ftable_and_len = -1ULL; best_off = 0; /* Get slack if we're after extra. */ if (want_extra) multiplier = 1.5; else multiplier = 1.0; /* Walk the list to see if any are large enough, getting less fussy * as we go. */ off = ntdb_read_off(ntdb, b_off); if (NTDB_OFF_IS_ERR(off)) { ecode = NTDB_OFF_TO_ERR(off); goto unlock_err; } off &= NTDB_OFF_MASK; while (off) { const struct ntdb_free_record *r; ntdb_off_t next; r = ntdb_access_read(ntdb, off, sizeof(*r), true); if (NTDB_PTR_IS_ERR(r)) { ecode = NTDB_PTR_ERR(r); goto unlock_err; } if (frec_magic(r) != NTDB_FREE_MAGIC) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "lock_and_alloc:" " %llu non-free 0x%llx", (long long)off, (long long)r->magic_and_prev); ntdb_access_release(ntdb, r); goto unlock_err; } if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) { best_off = off; best = *r; } if (frec_len(&best) <= size * multiplier && best_off) { ntdb_access_release(ntdb, r); break; } multiplier *= 1.01; next = r->next; ntdb_access_release(ntdb, r); off = next; } /* If we found anything at all, use it. */ if (best_off) { struct ntdb_used_record rec; size_t leftover; /* We're happy with this size: take it. */ ecode = remove_from_list(ntdb, b_off, best_off, &best); check_list(ntdb, b_off); if (ecode != NTDB_SUCCESS) { goto unlock_err; } leftover = record_leftover(keylen, datalen, want_extra, frec_len(&best)); assert(keylen + datalen + leftover <= frec_len(&best)); /* We need to mark non-free before we drop lock, otherwise * coalesce() could try to merge it! */ ecode = set_header(ntdb, &rec, magic, keylen, datalen, frec_len(&best) - leftover); if (ecode != NTDB_SUCCESS) { goto unlock_err; } ecode = ntdb_write_convert(ntdb, best_off, &rec, sizeof(rec)); if (ecode != NTDB_SUCCESS) { goto unlock_err; } /* For futureproofing, we put a 0 in any unused space. */ if (rec_extra_padding(&rec)) { ecode = ntdb->io->twrite(ntdb, best_off + sizeof(rec) + keylen + datalen, "", 1); if (ecode != NTDB_SUCCESS) { goto unlock_err; } } /* Bucket of leftover will be <= current bucket, so nested * locking is allowed. */ if (leftover) { ntdb->stats.alloc_leftover++; ecode = add_free_record(ntdb, best_off + sizeof(rec) + frec_len(&best) - leftover, leftover, NTDB_LOCK_WAIT, false); if (ecode != NTDB_SUCCESS) { best_off = NTDB_ERR_TO_OFF(ecode); } } ntdb_unlock_free_bucket(ntdb, b_off); return best_off; } ntdb_unlock_free_bucket(ntdb, b_off); return 0; unlock_err: ntdb_unlock_free_bucket(ntdb, b_off); return NTDB_ERR_TO_OFF(ecode); } /* Get a free block from current free list, or 0 if none, -ve on error. */ static ntdb_off_t get_free(struct ntdb_context *ntdb, size_t keylen, size_t datalen, bool want_extra, unsigned magic) { ntdb_off_t off, ftable_off; ntdb_off_t start_b, b, ftable; bool wrapped = false; /* If they are growing, add 50% to get to higher bucket. */ if (want_extra) start_b = size_to_bucket(adjust_size(keylen, datalen + datalen / 2)); else start_b = size_to_bucket(adjust_size(keylen, datalen)); ftable_off = ntdb->ftable_off; ftable = ntdb->ftable; while (!wrapped || ftable_off != ntdb->ftable_off) { /* Start at exact size bucket, and search up... */ for (b = find_free_head(ntdb, ftable_off, start_b); b < NTDB_FREE_BUCKETS; b = find_free_head(ntdb, ftable_off, b + 1)) { /* Try getting one from list. */ off = lock_and_alloc(ntdb, ftable_off, b, keylen, datalen, want_extra, magic); if (NTDB_OFF_IS_ERR(off)) return off; if (off != 0) { if (b == start_b) ntdb->stats.alloc_bucket_exact++; if (b == NTDB_FREE_BUCKETS - 1) ntdb->stats.alloc_bucket_max++; /* Worked? Stay using this list. */ ntdb->ftable_off = ftable_off; ntdb->ftable = ftable; return off; } /* Didn't work. Try next bucket. */ } if (NTDB_OFF_IS_ERR(b)) { return b; } /* Hmm, try next table. */ ftable_off = next_ftable(ntdb, ftable_off); if (NTDB_OFF_IS_ERR(ftable_off)) { return ftable_off; } ftable++; if (ftable_off == 0) { wrapped = true; ftable_off = first_ftable(ntdb); if (NTDB_OFF_IS_ERR(ftable_off)) { return ftable_off; } ftable = 0; } } return 0; } enum NTDB_ERROR set_header(struct ntdb_context *ntdb, struct ntdb_used_record *rec, unsigned magic, uint64_t keylen, uint64_t datalen, uint64_t actuallen) { uint64_t keybits = (fls64(keylen) + 1) / 2; rec->magic_and_meta = ((actuallen - (keylen + datalen)) << 11) | (keybits << 43) | ((uint64_t)magic << 48); rec->key_and_data_len = (keylen | (datalen << (keybits*2))); /* Encoding can fail on big values. */ if (rec_key_length(rec) != keylen || rec_data_length(rec) != datalen || rec_extra_padding(rec) != actuallen - (keylen + datalen)) { return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "Could not encode k=%llu,d=%llu,a=%llu", (long long)keylen, (long long)datalen, (long long)actuallen); } return NTDB_SUCCESS; } /* You need 'size', this tells you how much you should expand by. */ ntdb_off_t ntdb_expand_adjust(ntdb_off_t map_size, ntdb_off_t size) { ntdb_off_t new_size, top_size; /* limit size in order to avoid using up huge amounts of memory for * in memory tdbs if an oddball huge record creeps in */ if (size > 100 * 1024) { top_size = map_size + size * 2; } else { top_size = map_size + size * 100; } /* always make room for at least top_size more records, and at least 25% more space. if the DB is smaller than 100MiB, otherwise grow it by 10% only. */ if (map_size > 100 * 1024 * 1024) { new_size = map_size * 1.10; } else { new_size = map_size * 1.25; } if (new_size < top_size) new_size = top_size; /* We always make the file a multiple of transaction page * size. This guarantees that the transaction recovery area * is always aligned, otherwise the transaction code can overwrite * itself. */ new_size = (new_size + NTDB_PGSIZE-1) & ~(NTDB_PGSIZE-1); return new_size - map_size; } /* Expand the database. */ static enum NTDB_ERROR ntdb_expand(struct ntdb_context *ntdb, ntdb_len_t size) { uint64_t old_size; ntdb_len_t wanted; enum NTDB_ERROR ecode; /* Need to hold a hash lock to expand DB: transactions rely on it. */ if (!(ntdb->flags & NTDB_NOLOCK) && !ntdb->file->allrecord_lock.count && !ntdb_has_hash_locks(ntdb)) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_expand: must hold lock during expand"); } /* Only one person can expand file at a time. */ ecode = ntdb_lock_expand(ntdb, F_WRLCK); if (ecode != NTDB_SUCCESS) { return ecode; } /* Someone else may have expanded the file, so retry. */ old_size = ntdb->file->map_size; ntdb_oob(ntdb, ntdb->file->map_size, 1, true); if (ntdb->file->map_size != old_size) { ntdb_unlock_expand(ntdb, F_WRLCK); return NTDB_SUCCESS; } /* We need room for the record header too. */ size = adjust_size(0, sizeof(struct ntdb_used_record) + size); /* Overallocate. */ wanted = ntdb_expand_adjust(old_size, size); ecode = ntdb->io->expand_file(ntdb, wanted); if (ecode != NTDB_SUCCESS) { ntdb_unlock_expand(ntdb, F_WRLCK); return ecode; } /* We need to drop this lock before adding free record. */ ntdb_unlock_expand(ntdb, F_WRLCK); ntdb->stats.expands++; return add_free_record(ntdb, old_size, wanted, NTDB_LOCK_WAIT, true); } /* This won't fail: it will expand the database if it has to. */ ntdb_off_t alloc(struct ntdb_context *ntdb, size_t keylen, size_t datalen, unsigned magic, bool growing) { ntdb_off_t off; for (;;) { enum NTDB_ERROR ecode; off = get_free(ntdb, keylen, datalen, growing, magic); if (likely(off != 0)) break; ecode = ntdb_expand(ntdb, adjust_size(keylen, datalen)); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } } return off; } ntdb-1.0/hash.c000066400000000000000000000372761224151530700134070ustar00rootroot00000000000000 /* Trivial Database 2: hash handling Copyright (C) Rusty Russell 2010 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #include /* Default hash function. */ uint32_t ntdb_jenkins_hash(const void *key, size_t length, uint32_t seed, void *unused) { return hash_stable((const unsigned char *)key, length, seed); } uint32_t ntdb_hash(struct ntdb_context *ntdb, const void *ptr, size_t len) { return ntdb->hash_fn(ptr, len, ntdb->hash_seed, ntdb->hash_data); } static ntdb_bool_err key_matches(struct ntdb_context *ntdb, const struct ntdb_used_record *rec, ntdb_off_t off, const NTDB_DATA *key, const char **rptr) { ntdb_bool_err ret = false; const char *rkey; if (rec_key_length(rec) != key->dsize) { ntdb->stats.compare_wrong_keylen++; return ret; } rkey = ntdb_access_read(ntdb, off + sizeof(*rec), key->dsize + rec_data_length(rec), false); if (NTDB_PTR_IS_ERR(rkey)) { return (ntdb_bool_err)NTDB_PTR_ERR(rkey); } if (memcmp(rkey, key->dptr, key->dsize) == 0) { if (rptr) { *rptr = rkey; } else { ntdb_access_release(ntdb, rkey); } return true; } ntdb->stats.compare_wrong_keycmp++; ntdb_access_release(ntdb, rkey); return ret; } /* Does entry match? */ static ntdb_bool_err match(struct ntdb_context *ntdb, uint32_t hash, const NTDB_DATA *key, ntdb_off_t val, struct ntdb_used_record *rec, const char **rptr) { ntdb_off_t off; enum NTDB_ERROR ecode; ntdb->stats.compares++; /* Top bits of offset == next bits of hash. */ if (bits_from(hash, ntdb->hash_bits, NTDB_OFF_UPPER_STEAL) != bits_from(val, 64-NTDB_OFF_UPPER_STEAL, NTDB_OFF_UPPER_STEAL)) { ntdb->stats.compare_wrong_offsetbits++; return false; } off = val & NTDB_OFF_MASK; ecode = ntdb_read_convert(ntdb, off, rec, sizeof(*rec)); if (ecode != NTDB_SUCCESS) { return (ntdb_bool_err)ecode; } return key_matches(ntdb, rec, off, key, rptr); } static bool is_chain(ntdb_off_t val) { return val & (1ULL << NTDB_OFF_CHAIN_BIT); } static ntdb_off_t hbucket_off(ntdb_off_t base, ntdb_len_t idx) { return base + sizeof(struct ntdb_used_record) + idx * sizeof(ntdb_off_t); } /* This is the core routine which searches the hashtable for an entry. * On error, no locks are held and -ve is returned. * Otherwise, hinfo is filled in. * If not found, the return value is 0. * If found, the return value is the offset, and *rec is the record. */ ntdb_off_t find_and_lock(struct ntdb_context *ntdb, NTDB_DATA key, int ltype, struct hash_info *h, struct ntdb_used_record *rec, const char **rptr) { ntdb_off_t off, val; const ntdb_off_t *arr = NULL; ntdb_len_t i; bool found_empty; enum NTDB_ERROR ecode; struct ntdb_used_record chdr; ntdb_bool_err berr; h->h = ntdb_hash(ntdb, key.dptr, key.dsize); h->table = NTDB_HASH_OFFSET; h->table_size = 1 << ntdb->hash_bits; h->bucket = bits_from(h->h, 0, ntdb->hash_bits); h->old_val = 0; ecode = ntdb_lock_hash(ntdb, h->bucket, ltype); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } off = hbucket_off(h->table, h->bucket); val = ntdb_read_off(ntdb, off); if (NTDB_OFF_IS_ERR(val)) { ecode = NTDB_OFF_TO_ERR(val); goto fail; } /* Directly in hash table? */ if (!likely(is_chain(val))) { if (val) { berr = match(ntdb, h->h, &key, val, rec, rptr); if (berr < 0) { ecode = NTDB_OFF_TO_ERR(berr); goto fail; } if (berr) { return val & NTDB_OFF_MASK; } /* If you want to insert here, make a chain. */ h->old_val = val; } return 0; } /* Nope? Iterate through chain. */ h->table = val & NTDB_OFF_MASK; ecode = ntdb_read_convert(ntdb, h->table, &chdr, sizeof(chdr)); if (ecode != NTDB_SUCCESS) { goto fail; } if (rec_magic(&chdr) != NTDB_CHAIN_MAGIC) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "find_and_lock:" " corrupt record %#x at %llu", rec_magic(&chdr), (long long)off); goto fail; } h->table_size = rec_data_length(&chdr) / sizeof(ntdb_off_t); arr = ntdb_access_read(ntdb, hbucket_off(h->table, 0), rec_data_length(&chdr), true); if (NTDB_PTR_IS_ERR(arr)) { ecode = NTDB_PTR_ERR(arr); goto fail; } found_empty = false; for (i = 0; i < h->table_size; i++) { if (arr[i] == 0) { if (!found_empty) { h->bucket = i; found_empty = true; } } else { berr = match(ntdb, h->h, &key, arr[i], rec, rptr); if (berr < 0) { ecode = NTDB_OFF_TO_ERR(berr); ntdb_access_release(ntdb, arr); goto fail; } if (berr) { /* We found it! */ h->bucket = i; off = arr[i] & NTDB_OFF_MASK; ntdb_access_release(ntdb, arr); return off; } } } if (!found_empty) { /* Set to any non-zero value */ h->old_val = 1; h->bucket = i; } ntdb_access_release(ntdb, arr); return 0; fail: ntdb_unlock_hash(ntdb, h->bucket, ltype); return NTDB_ERR_TO_OFF(ecode); } static ntdb_off_t encode_offset(const struct ntdb_context *ntdb, ntdb_off_t new_off, uint32_t hash) { ntdb_off_t extra; assert((new_off & (1ULL << NTDB_OFF_CHAIN_BIT)) == 0); assert((new_off >> (64 - NTDB_OFF_UPPER_STEAL)) == 0); /* We pack extra hash bits into the upper bits of the offset. */ extra = bits_from(hash, ntdb->hash_bits, NTDB_OFF_UPPER_STEAL); extra <<= (64 - NTDB_OFF_UPPER_STEAL); return new_off | extra; } /* Simply overwrite the hash entry we found before. */ enum NTDB_ERROR replace_in_hash(struct ntdb_context *ntdb, const struct hash_info *h, ntdb_off_t new_off) { return ntdb_write_off(ntdb, hbucket_off(h->table, h->bucket), encode_offset(ntdb, new_off, h->h)); } enum NTDB_ERROR delete_from_hash(struct ntdb_context *ntdb, const struct hash_info *h) { return ntdb_write_off(ntdb, hbucket_off(h->table, h->bucket), 0); } enum NTDB_ERROR add_to_hash(struct ntdb_context *ntdb, const struct hash_info *h, ntdb_off_t new_off) { enum NTDB_ERROR ecode; ntdb_off_t chain; struct ntdb_used_record chdr; const ntdb_off_t *old; ntdb_off_t *new; /* We hit an empty bucket during search? That's where it goes. */ if (!h->old_val) { return replace_in_hash(ntdb, h, new_off); } /* Full at top-level? Create a 2-element chain. */ if (h->table == NTDB_HASH_OFFSET) { ntdb_off_t pair[2]; /* One element is old value, the other is the new value. */ pair[0] = h->old_val; pair[1] = encode_offset(ntdb, new_off, h->h); chain = alloc(ntdb, 0, sizeof(pair), NTDB_CHAIN_MAGIC, true); if (NTDB_OFF_IS_ERR(chain)) { return NTDB_OFF_TO_ERR(chain); } ecode = ntdb_write_convert(ntdb, chain + sizeof(struct ntdb_used_record), pair, sizeof(pair)); if (ecode == NTDB_SUCCESS) { ecode = ntdb_write_off(ntdb, hbucket_off(h->table, h->bucket), chain | (1ULL << NTDB_OFF_CHAIN_BIT)); } return ecode; } /* Full bucket. Expand. */ ecode = ntdb_read_convert(ntdb, h->table, &chdr, sizeof(chdr)); if (ecode != NTDB_SUCCESS) { return ecode; } if (rec_extra_padding(&chdr) >= sizeof(new_off)) { /* Expand in place. */ uint64_t dlen = rec_data_length(&chdr); ecode = set_header(ntdb, &chdr, NTDB_CHAIN_MAGIC, 0, dlen + sizeof(new_off), dlen + rec_extra_padding(&chdr)); if (ecode != NTDB_SUCCESS) { return ecode; } /* find_and_lock set up h to point to last bucket. */ ecode = replace_in_hash(ntdb, h, new_off); if (ecode != NTDB_SUCCESS) { return ecode; } ecode = ntdb_write_convert(ntdb, h->table, &chdr, sizeof(chdr)); if (ecode != NTDB_SUCCESS) { return ecode; } /* For futureproofing, we always make the first byte of padding * a zero. */ if (rec_extra_padding(&chdr)) { ecode = ntdb->io->twrite(ntdb, h->table + sizeof(chdr) + dlen + sizeof(new_off), "", 1); } return ecode; } /* We need to reallocate the chain. */ chain = alloc(ntdb, 0, (h->table_size + 1) * sizeof(ntdb_off_t), NTDB_CHAIN_MAGIC, true); if (NTDB_OFF_IS_ERR(chain)) { return NTDB_OFF_TO_ERR(chain); } /* Map both and copy across old buckets. */ old = ntdb_access_read(ntdb, hbucket_off(h->table, 0), h->table_size*sizeof(ntdb_off_t), true); if (NTDB_PTR_IS_ERR(old)) { return NTDB_PTR_ERR(old); } new = ntdb_access_write(ntdb, hbucket_off(chain, 0), (h->table_size + 1)*sizeof(ntdb_off_t), true); if (NTDB_PTR_IS_ERR(new)) { ntdb_access_release(ntdb, old); return NTDB_PTR_ERR(new); } memcpy(new, old, h->bucket * sizeof(ntdb_off_t)); new[h->bucket] = encode_offset(ntdb, new_off, h->h); ntdb_access_release(ntdb, old); ecode = ntdb_access_commit(ntdb, new); if (ecode != NTDB_SUCCESS) { return ecode; } /* Free the old chain. */ ecode = add_free_record(ntdb, h->table, sizeof(struct ntdb_used_record) + rec_data_length(&chdr) + rec_extra_padding(&chdr), NTDB_LOCK_WAIT, true); /* Replace top-level to point to new chain */ return ntdb_write_off(ntdb, hbucket_off(NTDB_HASH_OFFSET, bits_from(h->h, 0, ntdb->hash_bits)), chain | (1ULL << NTDB_OFF_CHAIN_BIT)); } /* Traverse support: returns offset of record, or 0 or -ve error. */ static ntdb_off_t iterate_chain(struct ntdb_context *ntdb, ntdb_off_t val, struct hash_info *h) { ntdb_off_t i; enum NTDB_ERROR ecode; struct ntdb_used_record chdr; /* First load up chain header. */ h->table = val & NTDB_OFF_MASK; ecode = ntdb_read_convert(ntdb, h->table, &chdr, sizeof(chdr)); if (ecode != NTDB_SUCCESS) { return ecode; } if (rec_magic(&chdr) != NTDB_CHAIN_MAGIC) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "get_table:" " corrupt record %#x at %llu", rec_magic(&chdr), (long long)h->table); } /* Chain length is implied by data length. */ h->table_size = rec_data_length(&chdr) / sizeof(ntdb_off_t); i = ntdb_find_nonzero_off(ntdb, hbucket_off(h->table, 0), h->bucket, h->table_size); if (NTDB_OFF_IS_ERR(i)) { return i; } if (i != h->table_size) { /* Return to next bucket. */ h->bucket = i + 1; val = ntdb_read_off(ntdb, hbucket_off(h->table, i)); if (NTDB_OFF_IS_ERR(val)) { return val; } return val & NTDB_OFF_MASK; } /* Go back up to hash table. */ h->table = NTDB_HASH_OFFSET; h->table_size = 1 << ntdb->hash_bits; h->bucket = bits_from(h->h, 0, ntdb->hash_bits) + 1; return 0; } /* Keeps hash locked unless returns 0 or error. */ static ntdb_off_t lock_and_iterate_hash(struct ntdb_context *ntdb, struct hash_info *h) { ntdb_off_t val, i; enum NTDB_ERROR ecode; if (h->table != NTDB_HASH_OFFSET) { /* We're in a chain. */ i = bits_from(h->h, 0, ntdb->hash_bits); ecode = ntdb_lock_hash(ntdb, i, F_RDLCK); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } /* We dropped lock, bucket might have moved! */ val = ntdb_read_off(ntdb, hbucket_off(NTDB_HASH_OFFSET, i)); if (NTDB_OFF_IS_ERR(val)) { goto unlock; } /* We don't remove chains: there should still be one there! */ if (!val || !is_chain(val)) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "iterate_hash:" " vanished hchain %llu at %llu", (long long)val, (long long)i); val = NTDB_ERR_TO_OFF(ecode); goto unlock; } /* Find next bucket in the chain. */ val = iterate_chain(ntdb, val, h); if (NTDB_OFF_IS_ERR(val)) { goto unlock; } if (val != 0) { return val; } ntdb_unlock_hash(ntdb, i, F_RDLCK); /* OK, we've reset h back to top level. */ } /* We do this unlocked, then re-check. */ for (i = ntdb_find_nonzero_off(ntdb, hbucket_off(h->table, 0), h->bucket, h->table_size); i != h->table_size; i = ntdb_find_nonzero_off(ntdb, hbucket_off(h->table, 0), i+1, h->table_size)) { ecode = ntdb_lock_hash(ntdb, i, F_RDLCK); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } val = ntdb_read_off(ntdb, hbucket_off(h->table, i)); if (NTDB_OFF_IS_ERR(val)) { goto unlock; } /* Lost race, and it's empty? */ if (!val) { ntdb->stats.traverse_val_vanished++; ntdb_unlock_hash(ntdb, i, F_RDLCK); continue; } if (!is_chain(val)) { /* So caller knows what lock to free. */ h->h = i; /* Return to next bucket. */ h->bucket = i + 1; val &= NTDB_OFF_MASK; return val; } /* Start at beginning of chain */ h->bucket = 0; h->h = i; val = iterate_chain(ntdb, val, h); if (NTDB_OFF_IS_ERR(val)) { goto unlock; } if (val != 0) { return val; } /* Otherwise, bucket has been set to i+1 */ ntdb_unlock_hash(ntdb, i, F_RDLCK); } return 0; unlock: ntdb_unlock_hash(ntdb, i, F_RDLCK); return val; } /* Return success if we find something, NTDB_ERR_NOEXIST if none. */ enum NTDB_ERROR next_in_hash(struct ntdb_context *ntdb, struct hash_info *h, NTDB_DATA *kbuf, size_t *dlen) { ntdb_off_t off; struct ntdb_used_record rec; enum NTDB_ERROR ecode; off = lock_and_iterate_hash(ntdb, h); if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } else if (off == 0) { return NTDB_ERR_NOEXIST; } /* The hash for this key is still locked. */ ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec)); if (ecode != NTDB_SUCCESS) { goto unlock; } if (rec_magic(&rec) != NTDB_USED_MAGIC) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "next_in_hash:" " corrupt record at %llu", (long long)off); goto unlock; } kbuf->dsize = rec_key_length(&rec); /* They want data as well? */ if (dlen) { *dlen = rec_data_length(&rec); kbuf->dptr = ntdb_alloc_read(ntdb, off + sizeof(rec), kbuf->dsize + *dlen); } else { kbuf->dptr = ntdb_alloc_read(ntdb, off + sizeof(rec), kbuf->dsize); } if (NTDB_PTR_IS_ERR(kbuf->dptr)) { ecode = NTDB_PTR_ERR(kbuf->dptr); goto unlock; } ecode = NTDB_SUCCESS; unlock: ntdb_unlock_hash(ntdb, bits_from(h->h, 0, ntdb->hash_bits), F_RDLCK); return ecode; } enum NTDB_ERROR first_in_hash(struct ntdb_context *ntdb, struct hash_info *h, NTDB_DATA *kbuf, size_t *dlen) { h->table = NTDB_HASH_OFFSET; h->table_size = 1 << ntdb->hash_bits; h->bucket = 0; return next_in_hash(ntdb, h, kbuf, dlen); } /* Even if the entry isn't in this hash bucket, you'd have to lock this * bucket to find it. */ static enum NTDB_ERROR chainlock(struct ntdb_context *ntdb, const NTDB_DATA *key, int ltype) { uint32_t h = ntdb_hash(ntdb, key->dptr, key->dsize); return ntdb_lock_hash(ntdb, bits_from(h, 0, ntdb->hash_bits), ltype); } /* lock/unlock one hash chain. This is meant to be used to reduce contention - it cannot guarantee how many records will be locked */ _PUBLIC_ enum NTDB_ERROR ntdb_chainlock(struct ntdb_context *ntdb, NTDB_DATA key) { return chainlock(ntdb, &key, F_WRLCK); } _PUBLIC_ void ntdb_chainunlock(struct ntdb_context *ntdb, NTDB_DATA key) { uint32_t h = ntdb_hash(ntdb, key.dptr, key.dsize); ntdb_unlock_hash(ntdb, bits_from(h, 0, ntdb->hash_bits), F_WRLCK); } _PUBLIC_ enum NTDB_ERROR ntdb_chainlock_read(struct ntdb_context *ntdb, NTDB_DATA key) { return chainlock(ntdb, &key, F_RDLCK); } _PUBLIC_ void ntdb_chainunlock_read(struct ntdb_context *ntdb, NTDB_DATA key) { uint32_t h = ntdb_hash(ntdb, key.dptr, key.dsize); ntdb_unlock_hash(ntdb, bits_from(h, 0, ntdb->hash_bits), F_RDLCK); } ntdb-1.0/io.c000066400000000000000000000437601224151530700130660ustar00rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 Copyright (C) Rusty Russell 2010 ** NOTE! The following LGPL license applies to the ntdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #include static void free_old_mmaps(struct ntdb_context *ntdb) { struct ntdb_old_mmap *i; assert(ntdb->file->direct_count == 0); while ((i = ntdb->file->old_mmaps) != NULL) { ntdb->file->old_mmaps = i->next; if (ntdb->flags & NTDB_INTERNAL) { ntdb->free_fn(i->map_ptr, ntdb->alloc_data); } else { munmap(i->map_ptr, i->map_size); } ntdb->free_fn(i, ntdb->alloc_data); } } static enum NTDB_ERROR save_old_map(struct ntdb_context *ntdb) { struct ntdb_old_mmap *old; assert(ntdb->file->direct_count); old = ntdb->alloc_fn(ntdb->file, sizeof(*old), ntdb->alloc_data); if (!old) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "save_old_map alloc failed"); } old->next = ntdb->file->old_mmaps; old->map_ptr = ntdb->file->map_ptr; old->map_size = ntdb->file->map_size; ntdb->file->old_mmaps = old; return NTDB_SUCCESS; } enum NTDB_ERROR ntdb_munmap(struct ntdb_context *ntdb) { if (ntdb->file->fd == -1) { return NTDB_SUCCESS; } if (!ntdb->file->map_ptr) { return NTDB_SUCCESS; } /* We can't unmap now if there are accessors. */ if (ntdb->file->direct_count) { return save_old_map(ntdb); } else { munmap(ntdb->file->map_ptr, ntdb->file->map_size); ntdb->file->map_ptr = NULL; } return NTDB_SUCCESS; } enum NTDB_ERROR ntdb_mmap(struct ntdb_context *ntdb) { int mmap_flags; if (ntdb->flags & NTDB_INTERNAL) return NTDB_SUCCESS; #ifndef HAVE_INCOHERENT_MMAP if (ntdb->flags & NTDB_NOMMAP) return NTDB_SUCCESS; #endif if ((ntdb->open_flags & O_ACCMODE) == O_RDONLY) mmap_flags = PROT_READ; else mmap_flags = PROT_READ | PROT_WRITE; /* size_t can be smaller than off_t. */ if ((size_t)ntdb->file->map_size == ntdb->file->map_size) { ntdb->file->map_ptr = mmap(NULL, ntdb->file->map_size, mmap_flags, MAP_SHARED, ntdb->file->fd, 0); } else ntdb->file->map_ptr = MAP_FAILED; /* * NB. When mmap fails it returns MAP_FAILED *NOT* NULL !!!! */ if (ntdb->file->map_ptr == MAP_FAILED) { ntdb->file->map_ptr = NULL; #ifdef HAVE_INCOHERENT_MMAP /* Incoherent mmap means everyone must mmap! */ return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_mmap failed for size %lld (%s)", (long long)ntdb->file->map_size, strerror(errno)); #else ntdb_logerr(ntdb, NTDB_SUCCESS, NTDB_LOG_WARNING, "ntdb_mmap failed for size %lld (%s)", (long long)ntdb->file->map_size, strerror(errno)); #endif } return NTDB_SUCCESS; } /* check for an out of bounds access - if it is out of bounds then see if the database has been expanded by someone else and expand if necessary note that "len" is the minimum length needed for the db. If probe is true, len being too large isn't a failure. */ static enum NTDB_ERROR ntdb_normal_oob(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len, bool probe) { struct stat st; enum NTDB_ERROR ecode; if (len + off < len) { if (probe) return NTDB_SUCCESS; return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_oob off %llu len %llu wrap\n", (long long)off, (long long)len); } if (ntdb->flags & NTDB_INTERNAL) { if (probe) return NTDB_SUCCESS; ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_oob len %lld beyond internal" " alloc size %lld", (long long)(off + len), (long long)ntdb->file->map_size); return NTDB_ERR_IO; } ecode = ntdb_lock_expand(ntdb, F_RDLCK); if (ecode != NTDB_SUCCESS) { return ecode; } if (fstat(ntdb->file->fd, &st) != 0) { ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "Failed to fstat file: %s", strerror(errno)); ntdb_unlock_expand(ntdb, F_RDLCK); return NTDB_ERR_IO; } ntdb_unlock_expand(ntdb, F_RDLCK); if (st.st_size < off + len) { if (probe) return NTDB_SUCCESS; ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_oob len %llu beyond eof at %llu", (long long)(off + len), (long long)st.st_size); return NTDB_ERR_IO; } /* Unmap, update size, remap */ ecode = ntdb_munmap(ntdb); if (ecode) { return ecode; } ntdb->file->map_size = st.st_size; return ntdb_mmap(ntdb); } /* Endian conversion: we only ever deal with 8 byte quantities */ void *ntdb_convert(const struct ntdb_context *ntdb, void *buf, ntdb_len_t size) { assert(size % 8 == 0); if (unlikely((ntdb->flags & NTDB_CONVERT)) && buf) { uint64_t i, *p = (uint64_t *)buf; for (i = 0; i < size / 8; i++) p[i] = bswap_64(p[i]); } return buf; } /* Return first non-zero offset in offset array, or end, or -ve error. */ /* FIXME: Return the off? */ uint64_t ntdb_find_nonzero_off(struct ntdb_context *ntdb, ntdb_off_t base, uint64_t start, uint64_t end) { uint64_t i; const uint64_t *val; /* Zero vs non-zero is the same unconverted: minor optimization. */ val = ntdb_access_read(ntdb, base + start * sizeof(ntdb_off_t), (end - start) * sizeof(ntdb_off_t), false); if (NTDB_PTR_IS_ERR(val)) { return NTDB_ERR_TO_OFF(NTDB_PTR_ERR(val)); } for (i = 0; i < (end - start); i++) { if (val[i]) break; } ntdb_access_release(ntdb, val); return start + i; } /* Return first zero offset in num offset array, or num, or -ve error. */ uint64_t ntdb_find_zero_off(struct ntdb_context *ntdb, ntdb_off_t off, uint64_t num) { uint64_t i; const uint64_t *val; /* Zero vs non-zero is the same unconverted: minor optimization. */ val = ntdb_access_read(ntdb, off, num * sizeof(ntdb_off_t), false); if (NTDB_PTR_IS_ERR(val)) { return NTDB_ERR_TO_OFF(NTDB_PTR_ERR(val)); } for (i = 0; i < num; i++) { if (!val[i]) break; } ntdb_access_release(ntdb, val); return i; } enum NTDB_ERROR zero_out(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len) { char buf[8192] = { 0 }; void *p = ntdb->io->direct(ntdb, off, len, true); enum NTDB_ERROR ecode = NTDB_SUCCESS; assert(!(ntdb->flags & NTDB_RDONLY)); if (NTDB_PTR_IS_ERR(p)) { return NTDB_PTR_ERR(p); } if (p) { memset(p, 0, len); return ecode; } while (len) { unsigned todo = len < sizeof(buf) ? len : sizeof(buf); ecode = ntdb->io->twrite(ntdb, off, buf, todo); if (ecode != NTDB_SUCCESS) { break; } len -= todo; off += todo; } return ecode; } /* write a lump of data at a specified offset */ static enum NTDB_ERROR ntdb_write(struct ntdb_context *ntdb, ntdb_off_t off, const void *buf, ntdb_len_t len) { enum NTDB_ERROR ecode; if (ntdb->flags & NTDB_RDONLY) { return ntdb_logerr(ntdb, NTDB_ERR_RDONLY, NTDB_LOG_USE_ERROR, "Write to read-only database"); } ecode = ntdb_oob(ntdb, off, len, false); if (ecode != NTDB_SUCCESS) { return ecode; } if (ntdb->file->map_ptr) { memcpy(off + (char *)ntdb->file->map_ptr, buf, len); } else { #ifdef HAVE_INCOHERENT_MMAP return NTDB_ERR_IO; #else ssize_t ret; ret = pwrite(ntdb->file->fd, buf, len, off); if (ret != len) { /* This shouldn't happen: we avoid sparse files. */ if (ret >= 0) errno = ENOSPC; return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_write: %zi at %zu len=%zu (%s)", ret, (size_t)off, (size_t)len, strerror(errno)); } #endif } return NTDB_SUCCESS; } /* read a lump of data at a specified offset */ static enum NTDB_ERROR ntdb_read(struct ntdb_context *ntdb, ntdb_off_t off, void *buf, ntdb_len_t len) { enum NTDB_ERROR ecode; ecode = ntdb_oob(ntdb, off, len, false); if (ecode != NTDB_SUCCESS) { return ecode; } if (ntdb->file->map_ptr) { memcpy(buf, off + (char *)ntdb->file->map_ptr, len); } else { #ifdef HAVE_INCOHERENT_MMAP return NTDB_ERR_IO; #else ssize_t r = pread(ntdb->file->fd, buf, len, off); if (r != len) { return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_read failed with %zi at %zu " "len=%zu (%s) map_size=%zu", r, (size_t)off, (size_t)len, strerror(errno), (size_t)ntdb->file->map_size); } #endif } return NTDB_SUCCESS; } enum NTDB_ERROR ntdb_write_convert(struct ntdb_context *ntdb, ntdb_off_t off, const void *rec, size_t len) { enum NTDB_ERROR ecode; if (unlikely((ntdb->flags & NTDB_CONVERT))) { void *conv = ntdb->alloc_fn(ntdb, len, ntdb->alloc_data); if (!conv) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_write: no memory converting" " %zu bytes", len); } memcpy(conv, rec, len); ecode = ntdb->io->twrite(ntdb, off, ntdb_convert(ntdb, conv, len), len); ntdb->free_fn(conv, ntdb->alloc_data); } else { ecode = ntdb->io->twrite(ntdb, off, rec, len); } return ecode; } enum NTDB_ERROR ntdb_read_convert(struct ntdb_context *ntdb, ntdb_off_t off, void *rec, size_t len) { enum NTDB_ERROR ecode = ntdb->io->tread(ntdb, off, rec, len); ntdb_convert(ntdb, rec, len); return ecode; } static void *_ntdb_alloc_read(struct ntdb_context *ntdb, ntdb_off_t offset, ntdb_len_t len, unsigned int prefix) { unsigned char *buf; enum NTDB_ERROR ecode; /* some systems don't like zero length malloc */ buf = ntdb->alloc_fn(ntdb, prefix + len ? prefix + len : 1, ntdb->alloc_data); if (!buf) { ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_alloc_read alloc failed len=%zu", (size_t)(prefix + len)); return NTDB_ERR_PTR(NTDB_ERR_OOM); } else { ecode = ntdb->io->tread(ntdb, offset, buf+prefix, len); if (unlikely(ecode != NTDB_SUCCESS)) { ntdb->free_fn(buf, ntdb->alloc_data); return NTDB_ERR_PTR(ecode); } } return buf; } /* read a lump of data, allocating the space for it */ void *ntdb_alloc_read(struct ntdb_context *ntdb, ntdb_off_t offset, ntdb_len_t len) { return _ntdb_alloc_read(ntdb, offset, len, 0); } static enum NTDB_ERROR fill(struct ntdb_context *ntdb, const void *buf, size_t size, ntdb_off_t off, ntdb_len_t len) { while (len) { size_t n = len > size ? size : len; ssize_t ret = pwrite(ntdb->file->fd, buf, n, off); if (ret != n) { if (ret >= 0) errno = ENOSPC; return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "fill failed:" " %zi at %zu len=%zu (%s)", ret, (size_t)off, (size_t)len, strerror(errno)); } len -= n; off += n; } return NTDB_SUCCESS; } /* expand a file. we prefer to use ftruncate, as that is what posix says to use for mmap expansion */ static enum NTDB_ERROR ntdb_expand_file(struct ntdb_context *ntdb, ntdb_len_t addition) { char buf[8192]; enum NTDB_ERROR ecode; assert((ntdb->file->map_size + addition) % NTDB_PGSIZE == 0); if (ntdb->flags & NTDB_RDONLY) { return ntdb_logerr(ntdb, NTDB_ERR_RDONLY, NTDB_LOG_USE_ERROR, "Expand on read-only database"); } if (ntdb->flags & NTDB_INTERNAL) { char *new; /* Can't free it if we have direct accesses. */ if (ntdb->file->direct_count) { ecode = save_old_map(ntdb); if (ecode) { return ecode; } new = ntdb->alloc_fn(ntdb->file, ntdb->file->map_size + addition, ntdb->alloc_data); if (new) { memcpy(new, ntdb->file->map_ptr, ntdb->file->map_size); } } else { new = ntdb->expand_fn(ntdb->file->map_ptr, ntdb->file->map_size + addition, ntdb->alloc_data); } if (!new) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "No memory to expand database"); } ntdb->file->map_ptr = new; ntdb->file->map_size += addition; return NTDB_SUCCESS; } else { /* Unmap before trying to write; old NTDB claimed OpenBSD had * problem with this otherwise. */ ecode = ntdb_munmap(ntdb); if (ecode) { return ecode; } /* If this fails, we try to fill anyway. */ if (ftruncate(ntdb->file->fd, ntdb->file->map_size + addition)) ; /* now fill the file with something. This ensures that the file isn't sparse, which would be very bad if we ran out of disk. This must be done with write, not via mmap */ memset(buf, 0x43, sizeof(buf)); ecode = fill(ntdb, buf, sizeof(buf), ntdb->file->map_size, addition); if (ecode != NTDB_SUCCESS) return ecode; ntdb->file->map_size += addition; return ntdb_mmap(ntdb); } } const void *ntdb_access_read(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len, bool convert) { void *ret = NULL; if (likely(!(ntdb->flags & NTDB_CONVERT))) { ret = ntdb->io->direct(ntdb, off, len, false); if (NTDB_PTR_IS_ERR(ret)) { return ret; } } if (!ret) { struct ntdb_access_hdr *hdr; hdr = _ntdb_alloc_read(ntdb, off, len, sizeof(*hdr)); if (NTDB_PTR_IS_ERR(hdr)) { return hdr; } hdr->next = ntdb->access; ntdb->access = hdr; ret = hdr + 1; if (convert) { ntdb_convert(ntdb, (void *)ret, len); } } else { ntdb->file->direct_count++; } return ret; } void *ntdb_access_write(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len, bool convert) { void *ret = NULL; if (ntdb->flags & NTDB_RDONLY) { ntdb_logerr(ntdb, NTDB_ERR_RDONLY, NTDB_LOG_USE_ERROR, "Write to read-only database"); return NTDB_ERR_PTR(NTDB_ERR_RDONLY); } if (likely(!(ntdb->flags & NTDB_CONVERT))) { ret = ntdb->io->direct(ntdb, off, len, true); if (NTDB_PTR_IS_ERR(ret)) { return ret; } } if (!ret) { struct ntdb_access_hdr *hdr; hdr = _ntdb_alloc_read(ntdb, off, len, sizeof(*hdr)); if (NTDB_PTR_IS_ERR(hdr)) { return hdr; } hdr->next = ntdb->access; ntdb->access = hdr; hdr->off = off; hdr->len = len; hdr->convert = convert; ret = hdr + 1; if (convert) ntdb_convert(ntdb, (void *)ret, len); } else { ntdb->file->direct_count++; } return ret; } static struct ntdb_access_hdr **find_hdr(struct ntdb_context *ntdb, const void *p) { struct ntdb_access_hdr **hp; for (hp = &ntdb->access; *hp; hp = &(*hp)->next) { if (*hp + 1 == p) return hp; } return NULL; } void ntdb_access_release(struct ntdb_context *ntdb, const void *p) { struct ntdb_access_hdr *hdr, **hp = find_hdr(ntdb, p); if (hp) { hdr = *hp; *hp = hdr->next; ntdb->free_fn(hdr, ntdb->alloc_data); } else { if (--ntdb->file->direct_count == 0) { free_old_mmaps(ntdb); } } } enum NTDB_ERROR ntdb_access_commit(struct ntdb_context *ntdb, void *p) { struct ntdb_access_hdr *hdr, **hp = find_hdr(ntdb, p); enum NTDB_ERROR ecode; if (hp) { hdr = *hp; if (hdr->convert) ecode = ntdb_write_convert(ntdb, hdr->off, p, hdr->len); else ecode = ntdb_write(ntdb, hdr->off, p, hdr->len); *hp = hdr->next; ntdb->free_fn(hdr, ntdb->alloc_data); } else { if (--ntdb->file->direct_count == 0) { free_old_mmaps(ntdb); } ecode = NTDB_SUCCESS; } return ecode; } static void *ntdb_direct(struct ntdb_context *ntdb, ntdb_off_t off, size_t len, bool write_mode) { enum NTDB_ERROR ecode; if (unlikely(!ntdb->file->map_ptr)) return NULL; ecode = ntdb_oob(ntdb, off, len, false); if (unlikely(ecode != NTDB_SUCCESS)) return NTDB_ERR_PTR(ecode); return (char *)ntdb->file->map_ptr + off; } static ntdb_off_t ntdb_read_normal_off(struct ntdb_context *ntdb, ntdb_off_t off) { ntdb_off_t ret; enum NTDB_ERROR ecode; ntdb_off_t *p; p = ntdb_direct(ntdb, off, sizeof(*p), false); if (NTDB_PTR_IS_ERR(p)) { return NTDB_ERR_TO_OFF(NTDB_PTR_ERR(p)); } if (likely(p)) { return *p; } ecode = ntdb_read(ntdb, off, &ret, sizeof(ret)); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } return ret; } static ntdb_off_t ntdb_read_convert_off(struct ntdb_context *ntdb, ntdb_off_t off) { ntdb_off_t ret; enum NTDB_ERROR ecode; ecode = ntdb_read_convert(ntdb, off, &ret, sizeof(ret)); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } return ret; } static enum NTDB_ERROR ntdb_write_normal_off(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_off_t val) { ntdb_off_t *p; p = ntdb_direct(ntdb, off, sizeof(*p), true); if (NTDB_PTR_IS_ERR(p)) { return NTDB_PTR_ERR(p); } if (likely(p)) { *p = val; return NTDB_SUCCESS; } return ntdb_write(ntdb, off, &val, sizeof(val)); } static enum NTDB_ERROR ntdb_write_convert_off(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_off_t val) { return ntdb_write_convert(ntdb, off, &val, sizeof(val)); } void ntdb_inc_seqnum(struct ntdb_context *ntdb) { ntdb_off_t seq; if (likely(!(ntdb->flags & NTDB_CONVERT))) { int64_t *direct; direct = ntdb->io->direct(ntdb, offsetof(struct ntdb_header, seqnum), sizeof(*direct), true); if (likely(direct)) { /* Don't let it go negative, even briefly */ if (unlikely((*direct) + 1) < 0) *direct = 0; (*direct)++; return; } } seq = ntdb_read_off(ntdb, offsetof(struct ntdb_header, seqnum)); if (!NTDB_OFF_IS_ERR(seq)) { seq++; if (unlikely((int64_t)seq < 0)) seq = 0; ntdb_write_off(ntdb, offsetof(struct ntdb_header, seqnum), seq); } } static const struct ntdb_methods io_methods = { ntdb_read, ntdb_write, ntdb_normal_oob, ntdb_expand_file, ntdb_direct, ntdb_read_normal_off, ntdb_write_normal_off, }; static const struct ntdb_methods io_convert_methods = { ntdb_read, ntdb_write, ntdb_normal_oob, ntdb_expand_file, ntdb_direct, ntdb_read_convert_off, ntdb_write_convert_off, }; /* initialise the default methods table */ void ntdb_io_init(struct ntdb_context *ntdb) { if (ntdb->flags & NTDB_CONVERT) ntdb->io = &io_convert_methods; else ntdb->io = &io_methods; } ntdb-1.0/lib/000077500000000000000000000000001224151530700130475ustar00rootroot00000000000000ntdb-1.0/lib/ccan/000077500000000000000000000000001224151530700137535ustar00rootroot00000000000000ntdb-1.0/lib/ccan/README000066400000000000000000000004501224151530700146320ustar00rootroot00000000000000These files are from the CCAN project (http://ccan.ozlabs.org); the _info and LICENSE files in each directory indicate their (separate) licenses. I'm trying to keep them in sync with the upstream versions as much as possible, so please be careful! Thanks! Rusty Russell ntdb-1.0/lib/ccan/array_size/000077500000000000000000000000001224151530700161235ustar00rootroot00000000000000ntdb-1.0/lib/ccan/array_size/_info000066400000000000000000000020001224151530700171300ustar00rootroot00000000000000#include #include #include "config.h" /** * array_size - routine for safely deriving the size of a visible array. * * This provides a simple ARRAY_SIZE() macro, which (given a good compiler) * will also break compile if you try to use it on a pointer. * * This can ensure your code is robust to changes, without needing a gratuitous * macro or constant. * * Example: * // Outputs "Initialized 32 values" * #include * #include * #include * * // We currently use 32 random values. * static unsigned int vals[32]; * * int main(void) * { * unsigned int i; * for (i = 0; i < ARRAY_SIZE(vals); i++) * vals[i] = random(); * printf("Initialized %u values\n", i); * return 0; * } * * License: Public domain * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/build_assert\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/array_size/array_size.h000066400000000000000000000015001224151530700204400ustar00rootroot00000000000000#ifndef CCAN_ARRAY_SIZE_H #define CCAN_ARRAY_SIZE_H #include "config.h" #include /** * ARRAY_SIZE - get the number of elements in a visible array * @arr: the array whose size you want. * * This does not work on pointers, or arrays declared as [], or * function parameters. With correct compiler support, such usage * will cause a build error (see build_assert). */ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + _array_size_chk(arr)) #if HAVE_BUILTIN_TYPES_COMPATIBLE_P && HAVE_TYPEOF /* Two gcc extensions. * &a[0] degrades to a pointer: a different type from an array */ #define _array_size_chk(arr) \ BUILD_ASSERT_OR_ZERO(!__builtin_types_compatible_p(typeof(arr), \ typeof(&(arr)[0]))) #else #define _array_size_chk(arr) 0 #endif #endif /* CCAN_ALIGNOF_H */ ntdb-1.0/lib/ccan/array_size/test/000077500000000000000000000000001224151530700171025ustar00rootroot00000000000000ntdb-1.0/lib/ccan/array_size/test/compile_fail-function-param.c000066400000000000000000000007651224151530700246220ustar00rootroot00000000000000#include #include struct foo { unsigned int a, b; }; int check_parameter(const struct foo array[4]); int check_parameter(const struct foo array[4]) { #ifdef FAIL return (ARRAY_SIZE(array) == 4); #if !HAVE_TYPEOF || !HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if _array_size_chk is a noop." #endif #else return sizeof(array) == 4 * sizeof(struct foo); #endif } int main(int argc, char *argv[]) { return check_parameter(NULL); } ntdb-1.0/lib/ccan/array_size/test/compile_fail.c000066400000000000000000000005021224151530700216660ustar00rootroot00000000000000#include int main(int argc, char *argv[8]) { char array[100]; #ifdef FAIL return ARRAY_SIZE(argv) + ARRAY_SIZE(array); #if !HAVE_TYPEOF || !HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if _array_size_chk is a noop." #endif #else return ARRAY_SIZE(array); #endif } ntdb-1.0/lib/ccan/array_size/test/run.c000066400000000000000000000013711224151530700200540ustar00rootroot00000000000000#include #include static char array1[1]; static int array2[2]; static unsigned long array3[3][5]; struct foo { unsigned int a, b; char string[100]; }; static struct foo array4[4]; /* Make sure they can be used in initializers. */ static int array1_size = ARRAY_SIZE(array1); static int array2_size = ARRAY_SIZE(array2); static int array3_size = ARRAY_SIZE(array3); static int array4_size = ARRAY_SIZE(array4); int main(int argc, char *argv[]) { plan_tests(8); ok1(array1_size == 1); ok1(array2_size == 2); ok1(array3_size == 3); ok1(array4_size == 4); ok1(ARRAY_SIZE(array1) == 1); ok1(ARRAY_SIZE(array2) == 2); ok1(ARRAY_SIZE(array3) == 3); ok1(ARRAY_SIZE(array4) == 4); return exit_status(); } ntdb-1.0/lib/ccan/asearch/000077500000000000000000000000001224151530700153615ustar00rootroot00000000000000ntdb-1.0/lib/ccan/asearch/LICENSE000066400000000000000000000636351224151530700164030ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/asearch/_info000066400000000000000000000022511224151530700163760ustar00rootroot00000000000000#include #include #include "config.h" /** * asearch - typesafe binary search (bsearch) * * An ordered array of objects can be efficiently searched using a binary * search algorithm; the time taken is around log(number of elements). * * This version uses macros to be typesafe on platforms which support it. * * License: LGPL * Author: Rusty Russell * * Example: * #include * #include * #include * * static int cmp(const char *key, char *const *elem) * { * return strcmp(key, *elem); * } * * int main(int argc, char *argv[]) * { * char **p; * * if (argc < 2) { * fprintf(stderr, "Usage: %s ...\n" * "Print position of key in (sorted) list\n", * argv[0]); * exit(1); * } * * p = asearch(argv[1], &argv[2], argc-2, cmp); * if (!p) { * printf("Not found!\n"); * return 1; * } * printf("%u\n", (int)(p - &argv[2])); * return 0; * } */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/typesafe_cb\n"); printf("ccan/array_size\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/asearch/asearch.h000066400000000000000000000025211224151530700171400ustar00rootroot00000000000000/* Licensed under LGPLv2.1+ - see LICENSE file for details */ #ifndef CCAN_ASEARCH_H #define CCAN_ASEARCH_H #include #include /** * asearch - search an array of elements * @key: pointer to item being searched for * @base: pointer to data to sort * @num: number of elements * @cmp: pointer to comparison function * * This function does a binary search on the given array. The * contents of the array should already be in ascending sorted order * under the provided comparison function. * * Note that the key need not have the same type as the elements in * the array, e.g. key could be a string and the comparison function * could compare the string with the struct's name field. However, if * the key and elements in the array are of the same type, you can use * the same comparison function for both sort() and asearch(). */ #if HAVE_TYPEOF #define asearch(key, base, num, cmp) \ ((__typeof__(*(base))*)(bsearch((key), (base), (num), sizeof(*(base)), \ typesafe_cb_cast(int (*)(const void *, const void *), \ int (*)(const __typeof__(*(key)) *, \ const __typeof__(*(base)) *), \ (cmp))))) #else #define asearch(key, base, num, cmp) \ (bsearch((key), (base), (num), sizeof(*(base)), \ (int (*)(const void *, const void *))(cmp))) #endif #endif /* CCAN_ASEARCH_H */ ntdb-1.0/lib/ccan/asearch/test/000077500000000000000000000000001224151530700163405ustar00rootroot00000000000000ntdb-1.0/lib/ccan/asearch/test/compile_fail-return-value-const.c000066400000000000000000000007541224151530700247100ustar00rootroot00000000000000#include #include #include static int cmp(const char *key, const char *const *elem) { return strcmp(key, *elem); } int main(void) { const char key[] = "key"; const char *elems[] = { "a", "big", "list", "of", "things" }; #ifdef FAIL char **p; #if !HAVE_TYPEOF #error "Unfortunately we don't fail if no typeof." #endif #else const char **p; #endif p = asearch(key, elems, ARRAY_SIZE(elems), cmp); return p ? 0 : 1; } ntdb-1.0/lib/ccan/asearch/test/compile_fail-return-value.c000066400000000000000000000005321224151530700235560ustar00rootroot00000000000000#include static int cmp(const char *key, char *const *elem) { return 0; } int main(int argc, char **argv) { const char key[] = "key"; #ifdef FAIL int **p; #if !HAVE_TYPEOF #error "Unfortunately we don't fail if no typeof." #endif #else char **p; #endif p = asearch(key, argv+1, argc-1, cmp); return p ? 0 : 1; } ntdb-1.0/lib/ccan/asearch/test/run-strings.c000066400000000000000000000006411224151530700210000ustar00rootroot00000000000000#include #include #include #include static int cmp(const int *key, const char *const *elem) { return *key - atoi(*elem); } int main(void) { const char *args[] = { "1", "4", "7", "9" }; int key = 7; const char **p; plan_tests(1); p = asearch(&key, args, ARRAY_SIZE(args), cmp); ok1(p == &args[2]); return exit_status(); } ntdb-1.0/lib/ccan/asearch/test/run.c000066400000000000000000000015331224151530700173120ustar00rootroot00000000000000#include #include #include #include static int test_cmp(const int *key, const int *elt) { if (*key < *elt) return -1; else if (*key > *elt) return 1; return 0; } int main(void) { const int arr[] = { INT_MIN, 0, 1, 2, 3, 4, 5, 6, INT_MAX }; unsigned int start, num, i, total = 0; int key; plan_tests(285); for (start = 0; start < ARRAY_SIZE(arr); start++) { for (num = 0; num < ARRAY_SIZE(arr) - start; num++) { key = 7; ok1(asearch(&key, &arr[start], num, test_cmp) == NULL); total++; for (i = start; i < start+num; i++) { const int *ret; key = arr[i]; ret = asearch(&key, &arr[start], num, test_cmp); ok1(ret); ok1(ret && *ret == key); total++; } } } diag("Tested %u searches\n", total); return exit_status(); } ntdb-1.0/lib/ccan/build_assert/000077500000000000000000000000001224151530700164335ustar00rootroot00000000000000ntdb-1.0/lib/ccan/build_assert/LICENSE000066400000000000000000000636351224151530700174550ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/build_assert/_info000066400000000000000000000025001224151530700174450ustar00rootroot00000000000000#include #include #include "config.h" /** * build_assert - routines for build-time assertions * * This code provides routines which will cause compilation to fail should some * assertion be untrue: such failures are preferable to run-time assertions, * but much more limited since they can only depends on compile-time constants. * * These assertions are most useful when two parts of the code must be kept in * sync: it is better to avoid such cases if possible, but seconds best is to * detect invalid changes at build time. * * For example, a tricky piece of code might rely on a certain element being at * the start of the structure. To ensure that future changes don't break it, * you would catch such changes in your code like so: * * Example: * #include * #include * * struct foo { * char string[5]; * int x; * }; * * static char *foo_string(struct foo *foo) * { * // This trick requires that the string be first in the structure * BUILD_ASSERT(offsetof(struct foo, string) == 0); * return (char *)foo; * } * * License: Public domain * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) /* Nothing. */ return 0; return 1; } ntdb-1.0/lib/ccan/build_assert/build_assert.h000066400000000000000000000022231224151530700212630ustar00rootroot00000000000000#ifndef CCAN_BUILD_ASSERT_H #define CCAN_BUILD_ASSERT_H /** * BUILD_ASSERT - assert a build-time dependency. * @cond: the compile-time condition which must be true. * * Your compile will fail if the condition isn't true, or can't be evaluated * by the compiler. This can only be used within a function. * * Example: * #include * ... * static char *foo_to_char(struct foo *foo) * { * // This code needs string to be at start of foo. * BUILD_ASSERT(offsetof(struct foo, string) == 0); * return (char *)foo; * } */ #define BUILD_ASSERT(cond) \ do { (void) sizeof(char [1 - 2*!(cond)]); } while(0) /** * BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression. * @cond: the compile-time condition which must be true. * * Your compile will fail if the condition isn't true, or can't be evaluated * by the compiler. This can be used in an expression: its value is "0". * * Example: * #define foo_to_char(foo) \ * ((char *)(foo) \ * + BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0)) */ #define BUILD_ASSERT_OR_ZERO(cond) \ (sizeof(char [1 - 2*!(cond)]) - 1) #endif /* CCAN_BUILD_ASSERT_H */ ntdb-1.0/lib/ccan/build_assert/test/000077500000000000000000000000001224151530700174125ustar00rootroot00000000000000ntdb-1.0/lib/ccan/build_assert/test/compile_fail-expr.c000066400000000000000000000002341224151530700231540ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { #ifdef FAIL return BUILD_ASSERT_OR_ZERO(1 == 0); #else return 0; #endif } ntdb-1.0/lib/ccan/build_assert/test/compile_fail.c000066400000000000000000000002071224151530700222000ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { #ifdef FAIL BUILD_ASSERT(1 == 0); #endif return 0; } ntdb-1.0/lib/ccan/build_assert/test/compile_ok.c000066400000000000000000000001641224151530700217000ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { BUILD_ASSERT(1 == 1); return 0; } ntdb-1.0/lib/ccan/build_assert/test/run-BUILD_ASSERT_OR_ZERO.c000066400000000000000000000002741224151530700235420ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { plan_tests(1); ok1(BUILD_ASSERT_OR_ZERO(1 == 1) == 0); return exit_status(); } ntdb-1.0/lib/ccan/build_assert/test/run-EXPR_BUILD_ASSERT.c000066400000000000000000000002711224151530700231360ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { plan_tests(1); ok1(EXPR_BUILD_ASSERT(1 == 1) == 0); return exit_status(); } ntdb-1.0/lib/ccan/cast/000077500000000000000000000000001224151530700147055ustar00rootroot00000000000000ntdb-1.0/lib/ccan/cast/LICENSE000066400000000000000000000636371224151530700157310ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/cast/_info000066400000000000000000000040701224151530700157230ustar00rootroot00000000000000#include #include "config.h" /** * cast - routines for safer casting. * * Often you want to cast in a limited way, such as removing a const or * switching between integer types. However, normal casts will work on * almost any type, making them dangerous when the code changes. * * These C++-inspired macros serve two purposes: they make it clear the * exact reason for the cast, and they also (with some compilers) cause * errors when misused. * * Based on Jan Engelhardt's libHX macros: http://libhx.sourceforge.net/ * * Author: Jan Engelhardt * Maintainer: Rusty Russell * License: LGPL (v2.1 or any later version) * * Example: * // Given "test" contains "3 t's in 'test string' * #include * #include * #include * * // Find char @orig in @str, if @repl, replace them. Return number. * static size_t find_chars(char *str, char orig, char repl) * { * size_t i, count = 0; * for (i = 0; str[i]; i++) { * if (str[i] == orig) { * count++; * if (repl) * str[i] = repl; * } * } * return count; * } * * // Terrible hash function. * static uint64_t hash_string(const unsigned char *str) * { * size_t i; * uint64_t hash = 0; * for (i = 0; str[i]; i++) * hash += str[i]; * return hash; * } * * int main(int argc, char *argv[]) * { * uint64_t hash; * * // find_chars wants a non-const string, but doesn't * // need it if repl == 0. * printf("%zu %c's in 'test string'\n", * find_chars(cast_const(char *, "test string"), * argv[1][0], 0), * argv[1][0]); * * // hash_string wants an unsigned char. * hash = hash_string(cast_signed(unsigned char *, argv[1])); * * // Need a long long to hand to printf. * printf("Hash of '%s' = %llu\n", argv[1], * cast_static(unsigned long long, hash)); * return 0; * } * */ int main(int argc, char *argv[]) { /* Expect exactly one argument */ if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/build_assert\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/cast/cast.h000066400000000000000000000115571224151530700160210ustar00rootroot00000000000000/* Licensed under LGPLv2.1+ - see LICENSE file for details */ #ifndef CCAN_CAST_H #define CCAN_CAST_H #include "config.h" #include #include /** * cast_signed - cast a (const) char * to/from (const) signed/unsigned char *. * @type: some char * variant. * @expr: expression (of some char * variant) to cast. * * Some libraries insist on an unsigned char in various places; cast_signed * makes sure (with suitable compiler) that the expression you are casting * only differs in signed/unsigned, not in type or const-ness. */ #define cast_signed(type, expr) \ (0 ? BUILD_ASSERT_OR_ZERO(cast_sign_compatible(type, (expr))) : \ (type)(expr)) /** * cast_const - remove a const qualifier from a pointer. * @type: some pointer type. * @expr: expression to cast. * * This ensures that you are only removing the const qualifier from an * expression. The expression must otherwise match @type. * * We cast via intptr_t to suppress gcc's -Wcast-qual (which SAMBA * uses), and via the ? : so Sun CC doesn't complain about the result * not being constant. * * If @type is a pointer to a pointer, you must use cast_const2 (etc). * * Example: * // Dumb open-coded strstr variant. * static char *find_needle(const char *haystack) * { * size_t i; * for (i = 0; i < strlen(haystack); i++) * if (memcmp("needle", haystack+i, strlen("needle")) == 0) * return cast_const(char *, haystack+i); * return NULL; * } */ #define cast_const(type, expr) \ (0 ? BUILD_ASSERT_OR_ZERO(cast_const_compat1((expr), type)) : \ (type)(intptr_t)(expr)) /** * cast_const2 - remove a const qualifier from a pointer to a pointer. * @type: some pointer to pointer type. * @expr: expression to cast. * * This ensures that you are only removing the const qualifier from an * expression. The expression must otherwise match @type. */ #define cast_const2(type, expr) \ (0 ? BUILD_ASSERT_OR_ZERO(cast_const_compat2((expr), type)) : \ (type)(intptr_t)(expr)) /** * cast_const3 - remove a const from a pointer to a pointer to a pointer.. * @type: some pointer to pointer to pointer type. * @expr: expression to cast. * * This ensures that you are only removing the const qualifier from an * expression. The expression must otherwise match @type. */ #define cast_const3(type, expr) \ (0 ? BUILD_ASSERT_OR_ZERO(cast_const_compat3((expr), type)) : \ (type)(intptr_t)(expr)) /** * cast_static - explicit mimic of implicit cast. * @type: some type. * @expr: expression to cast. * * This ensures that the cast is not to or from a pointer: it can only be * an implicit cast, such as a pointer to a similar const pointer, or between * integral types. */ #if HAVE_COMPOUND_LITERALS #define cast_static(type, expr) \ ((struct { type x; }){(expr)}.x) #else #define cast_static(type, expr) \ ((type)(expr)) #endif /* Herein lies the gcc magic to evoke compile errors. */ #if HAVE_BUILTIN_CHOOSE_EXPR && HAVE_BUILTIN_TYPES_COMPATIBLE_P && HAVE_TYPEOF #define cast_sign_compatible(t, e) \ __builtin_choose_expr( \ __builtin_types_compatible_p(__typeof__(t), char *) || \ __builtin_types_compatible_p(__typeof__(t), signed char *) || \ __builtin_types_compatible_p(__typeof__(t), unsigned char *), \ /* if type is not const qualified */ \ __builtin_types_compatible_p(__typeof__(e), char *) || \ __builtin_types_compatible_p(__typeof__(e), signed char *) || \ __builtin_types_compatible_p(__typeof__(e), unsigned char *), \ /* and if it is... */ \ __builtin_types_compatible_p(__typeof__(e), const char *) || \ __builtin_types_compatible_p(__typeof__(e), const signed char *) || \ __builtin_types_compatible_p(__typeof__(e), const unsigned char *) ||\ __builtin_types_compatible_p(__typeof__(e), char *) || \ __builtin_types_compatible_p(__typeof__(e), signed char *) || \ __builtin_types_compatible_p(__typeof__(e), unsigned char *) \ ) #define cast_const_strip1(expr) \ __typeof__(*(union { int z; __typeof__(expr) x; }){0}.x) #define cast_const_strip2(expr) \ __typeof__(**(union { int z; __typeof__(expr) x; }){0}.x) #define cast_const_strip3(expr) \ __typeof__(***(union { int z; __typeof__(expr) x; }){0}.x) #define cast_const_compat1(expr, type) \ __builtin_types_compatible_p(cast_const_strip1(expr), \ cast_const_strip1(type)) #define cast_const_compat2(expr, type) \ __builtin_types_compatible_p(cast_const_strip2(expr), \ cast_const_strip2(type)) #define cast_const_compat3(expr, type) \ __builtin_types_compatible_p(cast_const_strip3(expr), \ cast_const_strip3(type)) #else #define cast_sign_compatible(type, expr) \ (sizeof(*(type)0) == 1 && sizeof(*(expr)) == 1) #define cast_const_compat1(expr, type) (1) #define cast_const_compat2(expr, type) (1) #define cast_const_compat3(expr, type) (1) #endif #endif /* CCAN_CAST_H */ ntdb-1.0/lib/ccan/cast/test/000077500000000000000000000000001224151530700156645ustar00rootroot00000000000000ntdb-1.0/lib/ccan/cast/test/compile_fail-cast_const.c000066400000000000000000000010271224151530700226110ustar00rootroot00000000000000#include #include /* Note: this *isn't* sizeof(char) on all platforms. */ struct char_struct { char c; }; int main(int argc, char *argv[]) { char *uc; const #ifdef FAIL struct char_struct #else char #endif *p = NULL; uc = cast_const(char *, p); (void) uc; /* Suppress unused-but-set-variable warning. */ return 0; } #ifdef FAIL #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if cast_const can only use size" #endif #endif ntdb-1.0/lib/ccan/cast/test/compile_fail-cast_const2.c000066400000000000000000000010331224151530700226700ustar00rootroot00000000000000#include #include /* Note: this *isn't* sizeof(char) on all platforms. */ struct char_struct { char c; }; int main(int argc, char *argv[]) { char **uc; const #ifdef FAIL struct char_struct #else char #endif **p = NULL; uc = cast_const2(char **, p); (void) uc; /* Suppress unused-but-set-variable warning. */ return 0; } #ifdef FAIL #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if cast_const can only use size" #endif #endif ntdb-1.0/lib/ccan/cast/test/compile_fail-cast_const3.c000066400000000000000000000010361224151530700226740ustar00rootroot00000000000000#include #include /* Note: this *isn't* sizeof(char) on all platforms. */ struct char_struct { char c; }; int main(int argc, char *argv[]) { char ***uc; const #ifdef FAIL struct char_struct #else char #endif ***p = NULL; uc = cast_const3(char ***, p); (void) uc; /* Suppress unused-but-set-variable warning. */ return 0; } #ifdef FAIL #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if cast_const can only use size" #endif #endif ntdb-1.0/lib/ccan/cast/test/compile_fail-cast_signed-const.c000066400000000000000000000006641224151530700240660ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { unsigned char *uc; #ifdef FAIL const #endif char *p = NULL; uc = cast_signed(unsigned char *, p); (void) uc; /* Suppress unused-but-set-variable warning. */ return 0; } #ifdef FAIL #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if cast_const can only use size" #endif #endif ntdb-1.0/lib/ccan/cast/test/compile_fail-cast_signed-sizesame.c000066400000000000000000000010431224151530700245500ustar00rootroot00000000000000#include #include /* Note: this *isn't* sizeof(char) on all platforms. */ struct char_struct { char c; }; int main(int argc, char *argv[]) { unsigned char *uc; #ifdef FAIL struct char_struct #else char #endif *p = NULL; uc = cast_signed(unsigned char *, p); (void) uc; /* Suppress unused-but-set-variable warning. */ return 0; } #ifdef FAIL #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if cast_signed can only use size" #endif #endif ntdb-1.0/lib/ccan/cast/test/compile_fail-cast_signed.c000066400000000000000000000004121224151530700227310ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { unsigned char *uc; #ifdef FAIL int #else char #endif *p = NULL; uc = cast_signed(unsigned char *, p); (void) uc; /* Suppress unused-but-set-variable warning. */ return 0; } ntdb-1.0/lib/ccan/cast/test/compile_fail-cast_static-2.c000066400000000000000000000005501224151530700231110ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { char *c; #ifdef FAIL long #else char #endif *p = 0; c = cast_static(char *, p); (void) c; /* Suppress unused-but-set-variable warning. */ return 0; } #ifdef FAIL #if !HAVE_COMPOUND_LITERALS #error "Unfortunately we don't fail if cast_static is a noop" #endif #endif ntdb-1.0/lib/ccan/cast/test/compile_fail-cast_static-3.c000066400000000000000000000005421224151530700231130ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { char *c; #ifdef FAIL const #endif char *p = 0; c = cast_static(char *, p); (void) c; /* Suppress unused-but-set-variable warning. */ return 0; } #ifdef FAIL #if !HAVE_COMPOUND_LITERALS #error "Unfortunately we don't fail if cast_static is a noop" #endif #endif ntdb-1.0/lib/ccan/cast/test/compile_fail-cast_static.c000066400000000000000000000005661224151530700227610ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { long c; #ifdef FAIL char * #else char #endif x = 0; c = cast_static(long, x); (void) c; /* Suppress unused-but-set-variable warning. */ return 0; } #ifdef FAIL #if !HAVE_COMPOUND_LITERALS #error "Unfortunately we don't fail if cast_static without compound literals" #endif #endif ntdb-1.0/lib/ccan/cast/test/compile_ok-cast_void.c000066400000000000000000000002471224151530700221250ustar00rootroot00000000000000#include static void *remove_void(const void *p) { return cast_const(void *, p); } int main(void) { void *p = remove_void("foo"); return !p; } ntdb-1.0/lib/ccan/cast/test/compile_ok-static.c000066400000000000000000000003621224151530700214370ustar00rootroot00000000000000/* OpenIndiana's CC (aka suncc) has issues with constants: make sure * we are one! */ #include static char *p = cast_const(char *, (const char *)"hello"); int main(int argc, char *argv[]) { return p[0] == argv[0][0]; } ntdb-1.0/lib/ccan/check_type/000077500000000000000000000000001224151530700160715ustar00rootroot00000000000000ntdb-1.0/lib/ccan/check_type/_info000066400000000000000000000015031224151530700171050ustar00rootroot00000000000000#include #include #include "config.h" /** * check_type - routines for compile time type checking * * C has fairly weak typing: ints get automatically converted to longs, signed * to unsigned, etc. There are some cases where this is best avoided, and * these macros provide methods for evoking warnings (or build errors) when * a precise type isn't used. * * On compilers which don't support typeof() these routines are less effective, * since they have to use sizeof() which can only distiguish between types of * different size. * * License: Public domain * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { #if !HAVE_TYPEOF printf("ccan/build_assert\n"); #endif return 0; } return 1; } ntdb-1.0/lib/ccan/check_type/check_type.h000066400000000000000000000044141224151530700203630ustar00rootroot00000000000000#ifndef CCAN_CHECK_TYPE_H #define CCAN_CHECK_TYPE_H #include "config.h" /** * check_type - issue a warning or build failure if type is not correct. * @expr: the expression whose type we should check (not evaluated). * @type: the exact type we expect the expression to be. * * This macro is usually used within other macros to try to ensure that a macro * argument is of the expected type. No type promotion of the expression is * done: an unsigned int is not the same as an int! * * check_type() always evaluates to 0. * * If your compiler does not support typeof, then the best we can do is fail * to compile if the sizes of the types are unequal (a less complete check). * * Example: * // They should always pass a 64-bit value to _set_some_value! * #define set_some_value(expr) \ * _set_some_value((check_type((expr), uint64_t), (expr))) */ /** * check_types_match - issue a warning or build failure if types are not same. * @expr1: the first expression (not evaluated). * @expr2: the second expression (not evaluated). * * This macro is usually used within other macros to try to ensure that * arguments are of identical types. No type promotion of the expressions is * done: an unsigned int is not the same as an int! * * check_types_match() always evaluates to 0. * * If your compiler does not support typeof, then the best we can do is fail * to compile if the sizes of the types are unequal (a less complete check). * * Example: * // Do subtraction to get to enclosing type, but make sure that * // pointer is of correct type for that member. * #define container_of(mbr_ptr, encl_type, mbr) \ * (check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \ * ((encl_type *) \ * ((char *)(mbr_ptr) - offsetof(enclosing_type, mbr)))) */ #if HAVE_TYPEOF #define check_type(expr, type) \ ((typeof(expr) *)0 != (type *)0) #define check_types_match(expr1, expr2) \ ((typeof(expr1) *)0 != (typeof(expr2) *)0) #else #include /* Without typeof, we can only test the sizes. */ #define check_type(expr, type) \ BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type)) #define check_types_match(expr1, expr2) \ BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2)) #endif /* HAVE_TYPEOF */ #endif /* CCAN_CHECK_TYPE_H */ ntdb-1.0/lib/ccan/check_type/test/000077500000000000000000000000001224151530700170505ustar00rootroot00000000000000ntdb-1.0/lib/ccan/check_type/test/compile_fail-check_type.c000066400000000000000000000002051224151530700237500ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { #ifdef FAIL check_type(argc, char); #endif return 0; } ntdb-1.0/lib/ccan/check_type/test/compile_fail-check_type_unsigned.c000066400000000000000000000003751224151530700256540ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { #ifdef FAIL #if HAVE_TYPEOF check_type(argc, unsigned int); #else /* This doesn't work without typeof, so just fail */ #error "Fail without typeof" #endif #endif return 0; } ntdb-1.0/lib/ccan/check_type/test/compile_fail-check_types_match.c000066400000000000000000000002421224151530700253100ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { unsigned char x = argc; #ifdef FAIL check_types_match(argc, x); #endif return x; } ntdb-1.0/lib/ccan/check_type/test/run.c000066400000000000000000000010621224151530700200170ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { int x = 0, y = 0; plan_tests(9); ok1(check_type(argc, int) == 0); ok1(check_type(&argc, int *) == 0); ok1(check_types_match(argc, argc) == 0); ok1(check_types_match(argc, x) == 0); ok1(check_types_match(&argc, &x) == 0); ok1(check_type(x++, int) == 0); ok(x == 0, "check_type does not evaluate expression"); ok1(check_types_match(x++, y++) == 0); ok(x == 0 && y == 0, "check_types_match does not evaluate expressions"); return exit_status(); } ntdb-1.0/lib/ccan/compiler/000077500000000000000000000000001224151530700155655ustar00rootroot00000000000000ntdb-1.0/lib/ccan/compiler/LICENSE000066400000000000000000000167251224151530700166050ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ntdb-1.0/lib/ccan/compiler/_info000066400000000000000000000027671224151530700166160ustar00rootroot00000000000000#include #include #include "config.h" /** * compiler - macros for common compiler extensions * * Abstracts away some compiler hints. Currently these include: * - COLD * For functions not called in fast paths (aka. cold functions) * - PRINTF_FMT * For functions which take printf-style parameters. * - CONST_FUNCTION * For functions which return the same value for same parameters. * - NEEDED * For functions and variables which must be emitted even if unused. * - UNNEEDED * For functions and variables which need not be emitted if unused. * - UNUSED * For parameters which are not used. * - IS_COMPILE_CONSTANT() * For using different tradeoffs for compiletime vs runtime evaluation. * * License: Public Domain * Author: Rusty Russell * * Example: * #include * #include * #include * * // Example of a (slow-path) logging function. * static int log_threshold = 2; * static void COLD PRINTF_FMT(2,3) * logger(int level, const char *fmt, ...) * { * va_list ap; * va_start(ap, fmt); * if (level >= log_threshold) * vfprintf(stderr, fmt, ap); * va_end(ap); * } * * int main(int argc, char *argv[]) * { * if (argc != 1) { * logger(3, "Don't want %i arguments!\n", argc-1); * return 1; * } * return 0; * } */ int main(int argc, char *argv[]) { /* Expect exactly one argument */ if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { return 0; } return 1; } ntdb-1.0/lib/ccan/compiler/compiler.h000066400000000000000000000126351224151530700175570ustar00rootroot00000000000000#ifndef CCAN_COMPILER_H #define CCAN_COMPILER_H #include "config.h" #ifndef COLD #if HAVE_ATTRIBUTE_COLD /** * COLD - a function is unlikely to be called. * * Used to mark an unlikely code path and optimize appropriately. * It is usually used on logging or error routines. * * Example: * static void COLD moan(const char *reason) * { * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno)); * } */ #define COLD __attribute__((cold)) #else #define COLD #endif #endif #ifndef NORETURN #if HAVE_ATTRIBUTE_NORETURN /** * NORETURN - a function does not return * * Used to mark a function which exits; useful for suppressing warnings. * * Example: * static void NORETURN fail(const char *reason) * { * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno)); * exit(1); * } */ #define NORETURN __attribute__((noreturn)) #else #define NORETURN #endif #endif #ifndef PRINTF_FMT #if HAVE_ATTRIBUTE_PRINTF /** * PRINTF_FMT - a function takes printf-style arguments * @nfmt: the 1-based number of the function's format argument. * @narg: the 1-based number of the function's first variable argument. * * This allows the compiler to check your parameters as it does for printf(). * * Example: * void PRINTF_FMT(2,3) my_printf(const char *prefix, const char *fmt, ...); */ #define PRINTF_FMT(nfmt, narg) \ __attribute__((format(__printf__, nfmt, narg))) #else #define PRINTF_FMT(nfmt, narg) #endif #endif #ifndef CONST_FUNCTION #if HAVE_ATTRIBUTE_CONST /** * CONST_FUNCTION - a function's return depends only on its argument * * This allows the compiler to assume that the function will return the exact * same value for the exact same arguments. This implies that the function * must not use global variables, or dereference pointer arguments. */ #define CONST_FUNCTION __attribute__((const)) #else #define CONST_FUNCTION #endif #endif #if HAVE_ATTRIBUTE_UNUSED #ifndef UNNEEDED /** * UNNEEDED - a variable/function may not be needed * * This suppresses warnings about unused variables or functions, but tells * the compiler that if it is unused it need not emit it into the source code. * * Example: * // With some preprocessor options, this is unnecessary. * static UNNEEDED int counter; * * // With some preprocessor options, this is unnecessary. * static UNNEEDED void add_to_counter(int add) * { * counter += add; * } */ #define UNNEEDED __attribute__((unused)) #endif #ifndef NEEDED #if HAVE_ATTRIBUTE_USED /** * NEEDED - a variable/function is needed * * This suppresses warnings about unused variables or functions, but tells * the compiler that it must exist even if it (seems) unused. * * Example: * // Even if this is unused, these are vital for debugging. * static NEEDED int counter; * static NEEDED void dump_counter(void) * { * printf("Counter is %i\n", counter); * } */ #define NEEDED __attribute__((used)) #else /* Before used, unused functions and vars were always emitted. */ #define NEEDED __attribute__((unused)) #endif #endif #ifndef UNUSED /** * UNUSED - a parameter is unused * * Some compilers (eg. gcc with -W or -Wunused) warn about unused * function parameters. This suppresses such warnings and indicates * to the reader that it's deliberate. * * Example: * // This is used as a callback, so needs to have this prototype. * static int some_callback(void *unused UNUSED) * { * return 0; * } */ #define UNUSED __attribute__((unused)) #endif #else #ifndef UNNEEDED #define UNNEEDED #endif #ifndef NEEDED #define NEEDED #endif #ifndef UNUSED #define UNUSED #endif #endif #ifndef IS_COMPILE_CONSTANT #if HAVE_BUILTIN_CONSTANT_P /** * IS_COMPILE_CONSTANT - does the compiler know the value of this expression? * @expr: the expression to evaluate * * When an expression manipulation is complicated, it is usually better to * implement it in a function. However, if the expression being manipulated is * known at compile time, it is better to have the compiler see the entire * expression so it can simply substitute the result. * * This can be done using the IS_COMPILE_CONSTANT() macro. * * Example: * enum greek { ALPHA, BETA, GAMMA, DELTA, EPSILON }; * * // Out-of-line version. * const char *greek_name(enum greek greek); * * // Inline version. * static inline const char *_greek_name(enum greek greek) * { * switch (greek) { * case ALPHA: return "alpha"; * case BETA: return "beta"; * case GAMMA: return "gamma"; * case DELTA: return "delta"; * case EPSILON: return "epsilon"; * default: return "**INVALID**"; * } * } * * // Use inline if compiler knows answer. Otherwise call function * // to avoid copies of the same code everywhere. * #define greek_name(g) \ * (IS_COMPILE_CONSTANT(greek) ? _greek_name(g) : greek_name(g)) */ #define IS_COMPILE_CONSTANT(expr) __builtin_constant_p(expr) #else /* If we don't know, assume it's not. */ #define IS_COMPILE_CONSTANT(expr) 0 #endif #endif #ifndef WARN_UNUSED_RESULT #if HAVE_WARN_UNUSED_RESULT /** * WARN_UNUSED_RESULT - warn if a function return value is unused. * * Used to mark a function where it is extremely unlikely that the caller * can ignore the result, eg realloc(). * * Example: * // buf param may be freed by this; need return value! * static char *WARN_UNUSED_RESULT enlarge(char *buf, unsigned *size) * { * return realloc(buf, (*size) *= 2); * } */ #define WARN_UNUSED_RESULT __attribute__((warn_unused_result)) #else #define WARN_UNUSED_RESULT #endif #endif #endif /* CCAN_COMPILER_H */ ntdb-1.0/lib/ccan/compiler/test/000077500000000000000000000000001224151530700165445ustar00rootroot00000000000000ntdb-1.0/lib/ccan/compiler/test/compile_fail-printf.c000066400000000000000000000005511224151530700226340ustar00rootroot00000000000000#include static void PRINTF_FMT(2,3) my_printf(int x, const char *fmt, ...) { } int main(int argc, char *argv[]) { unsigned int i = 0; my_printf(1, "Not a pointer " #ifdef FAIL "%p", #if !HAVE_ATTRIBUTE_PRINTF #error "Unfortunately we don't fail if !HAVE_ATTRIBUTE_PRINTF." #endif #else "%i", #endif i); return 0; } ntdb-1.0/lib/ccan/compiler/test/run-is_compile_constant.c000066400000000000000000000004751224151530700235540ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { plan_tests(2); ok1(!IS_COMPILE_CONSTANT(argc)); #if HAVE_BUILTIN_CONSTANT_P ok1(IS_COMPILE_CONSTANT(7)); #else pass("If !HAVE_BUILTIN_CONSTANT_P, IS_COMPILE_CONSTANT always false"); #endif return exit_status(); } ntdb-1.0/lib/ccan/container_of/000077500000000000000000000000001224151530700164215ustar00rootroot00000000000000ntdb-1.0/lib/ccan/container_of/_info000066400000000000000000000024661224151530700174460ustar00rootroot00000000000000#include #include #include "config.h" /** * container_of - routine for upcasting * * It is often convenient to create code where the caller registers a pointer * to a generic structure and a callback. The callback might know that the * pointer points to within a larger structure, and container_of gives a * convenient and fairly type-safe way of returning to the enclosing structure. * * This idiom is an alternative to providing a void * pointer for every * callback. * * Example: * #include * #include * * struct timer { * void *members; * }; * * struct info { * int my_stuff; * struct timer timer; * }; * * static void register_timer(struct timer *timer) * { * //... * } * * static void my_timer_callback(struct timer *timer) * { * struct info *info = container_of(timer, struct info, timer); * printf("my_stuff is %u\n", info->my_stuff); * } * * int main(void) * { * struct info info = { .my_stuff = 1 }; * * register_timer(&info.timer); * // ... * return 0; * } * * License: Public domain * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/check_type\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/container_of/container_of.h000066400000000000000000000060271224151530700212450ustar00rootroot00000000000000#ifndef CCAN_CONTAINER_OF_H #define CCAN_CONTAINER_OF_H #include #include "config.h" #include /** * container_of - get pointer to enclosing structure * @member_ptr: pointer to the structure member * @containing_type: the type this member is within * @member: the name of this member within the structure. * * Given a pointer to a member of a structure, this macro does pointer * subtraction to return the pointer to the enclosing type. * * Example: * struct foo { * int fielda, fieldb; * // ... * }; * struct info { * int some_other_field; * struct foo my_foo; * }; * * static struct info *foo_to_info(struct foo *foo) * { * return container_of(foo, struct info, my_foo); * } */ #define container_of(member_ptr, containing_type, member) \ ((containing_type *) \ ((char *)(member_ptr) \ - container_off(containing_type, member)) \ + check_types_match(*(member_ptr), ((containing_type *)0)->member)) /** * container_off - get offset to enclosing structure * @containing_type: the type this member is within * @member: the name of this member within the structure. * * Given a pointer to a member of a structure, this macro does * typechecking and figures out the offset to the enclosing type. * * Example: * struct foo { * int fielda, fieldb; * // ... * }; * struct info { * int some_other_field; * struct foo my_foo; * }; * * static struct info *foo_to_info(struct foo *foo) * { * size_t off = container_off(struct info, my_foo); * return (void *)((char *)foo - off); * } */ #define container_off(containing_type, member) \ offsetof(containing_type, member) /** * container_of_var - get pointer to enclosing structure using a variable * @member_ptr: pointer to the structure member * @container_var: a pointer of same type as this member's container * @member: the name of this member within the structure. * * Given a pointer to a member of a structure, this macro does pointer * subtraction to return the pointer to the enclosing type. * * Example: * static struct info *foo_to_i(struct foo *foo) * { * struct info *i = container_of_var(foo, i, my_foo); * return i; * } */ #if HAVE_TYPEOF #define container_of_var(member_ptr, container_var, member) \ container_of(member_ptr, typeof(*container_var), member) #else #define container_of_var(member_ptr, container_var, member) \ ((void *)((char *)(member_ptr) - \ container_off_var(container_var, member))) #endif /** * container_off_var - get offset of a field in enclosing structure * @container_var: a pointer to a container structure * @member: the name of a member within the structure. * * Given (any) pointer to a structure and a its member name, this * macro does pointer subtraction to return offset of member in a * structure memory layout. * */ #if HAVE_TYPEOF #define container_off_var(var, member) \ container_off(typeof(*var), member) #else #define container_off_var(var, member) \ ((char *)&(var)->member - (char *)(var)) #endif #endif /* CCAN_CONTAINER_OF_H */ ntdb-1.0/lib/ccan/container_of/test/000077500000000000000000000000001224151530700174005ustar00rootroot00000000000000ntdb-1.0/lib/ccan/container_of/test/compile_fail-bad-type.c000066400000000000000000000005511224151530700236730ustar00rootroot00000000000000#include #include struct foo { int a; char b; }; int main(int argc, char *argv[]) { struct foo foo = { .a = 1, .b = 2 }; int *intp = &foo.a; char *p; #ifdef FAIL /* p is a char *, but this gives a struct foo * */ p = container_of(intp, struct foo, a); #else p = (char *)intp; #endif return p == NULL; } ntdb-1.0/lib/ccan/container_of/test/compile_fail-types.c000066400000000000000000000006321224151530700233320ustar00rootroot00000000000000#include #include struct foo { int a; char b; }; int main(int argc, char *argv[]) { struct foo foo = { .a = 1, .b = 2 }, *foop; int *intp = &foo.a; #ifdef FAIL /* b is a char, but intp is an int * */ foop = container_of(intp, struct foo, b); #else foop = NULL; #endif (void) foop; /* Suppress unused-but-set-variable warning. */ return intp == NULL; } ntdb-1.0/lib/ccan/container_of/test/compile_fail-var-types.c000066400000000000000000000007561224151530700241270ustar00rootroot00000000000000#include #include struct foo { int a; char b; }; int main(int argc, char *argv[]) { struct foo foo = { .a = 1, .b = 2 }, *foop; int *intp = &foo.a; #ifdef FAIL /* b is a char, but intp is an int * */ foop = container_of_var(intp, foop, b); #if !HAVE_TYPEOF #error "Unfortunately we don't fail if we don't have typeof." #endif #else foop = NULL; #endif (void) foop; /* Suppress unused-but-set-variable warning. */ return intp == NULL; } ntdb-1.0/lib/ccan/container_of/test/run.c000066400000000000000000000012271224151530700203520ustar00rootroot00000000000000#include #include struct foo { int a; char b; }; int main(int argc, char *argv[]) { struct foo foo = { .a = 1, .b = 2 }; int *intp = &foo.a; char *charp = &foo.b; plan_tests(8); ok1(container_of(intp, struct foo, a) == &foo); ok1(container_of(charp, struct foo, b) == &foo); ok1(container_of_var(intp, &foo, a) == &foo); ok1(container_of_var(charp, &foo, b) == &foo); ok1(container_off(struct foo, a) == 0); ok1(container_off(struct foo, b) == offsetof(struct foo, b)); ok1(container_off_var(&foo, a) == 0); ok1(container_off_var(&foo, b) == offsetof(struct foo, b)); return exit_status(); } ntdb-1.0/lib/ccan/endian/000077500000000000000000000000001224151530700152115ustar00rootroot00000000000000ntdb-1.0/lib/ccan/endian/LICENSE000066400000000000000000000636351224151530700162330ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/endian/_info000066400000000000000000000024501224151530700162270ustar00rootroot00000000000000#include #include #include "config.h" /** * endian - endian conversion macros for simple types * * Portable protocols (such as on-disk formats, or network protocols) * are often defined to be a particular endian: little-endian (least * significant bytes first) or big-endian (most significant bytes * first). * * Similarly, some CPUs lay out values in memory in little-endian * order (most commonly, Intel's 8086 and derivatives), or big-endian * order (almost everyone else). * * This module provides conversion routines, inspired by the linux kernel. * * Example: * #include * #include * #include * * // * int main(int argc, char *argv[]) * { * uint32_t value; * * if (argc != 2) * errx(1, "Usage: %s ", argv[0]); * * value = atoi(argv[1]); * printf("native: %08x\n", value); * printf("little-endian: %08x\n", cpu_to_le32(value)); * printf("big-endian: %08x\n", cpu_to_be32(value)); * printf("byte-reversed: %08x\n", bswap_32(value)); * exit(0); * } * * License: LGPL (v2.1 or any later version) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) /* Nothing */ return 0; return 1; } ntdb-1.0/lib/ccan/endian/endian.h000066400000000000000000000114471224151530700166270ustar00rootroot00000000000000/* Licensed under LGPLv2.1+ - see LICENSE file for details */ #ifndef CCAN_ENDIAN_H #define CCAN_ENDIAN_H #include #include "config.h" #if HAVE_BYTESWAP_H #include #else /** * bswap_16 - reverse bytes in a uint16_t value. * @val: value whose bytes to swap. * * Example: * // Output contains "1024 is 4 as two bytes reversed" * printf("1024 is %u as two bytes reversed\n", bswap_16(1024)); */ static inline uint16_t bswap_16(uint16_t val) { return ((val & (uint16_t)0x00ffU) << 8) | ((val & (uint16_t)0xff00U) >> 8); } /** * bswap_32 - reverse bytes in a uint32_t value. * @val: value whose bytes to swap. * * Example: * // Output contains "1024 is 262144 as four bytes reversed" * printf("1024 is %u as four bytes reversed\n", bswap_32(1024)); */ static inline uint32_t bswap_32(uint32_t val) { return ((val & (uint32_t)0x000000ffUL) << 24) | ((val & (uint32_t)0x0000ff00UL) << 8) | ((val & (uint32_t)0x00ff0000UL) >> 8) | ((val & (uint32_t)0xff000000UL) >> 24); } #endif /* !HAVE_BYTESWAP_H */ #if !HAVE_BSWAP_64 /** * bswap_64 - reverse bytes in a uint64_t value. * @val: value whose bytes to swap. * * Example: * // Output contains "1024 is 1125899906842624 as eight bytes reversed" * printf("1024 is %llu as eight bytes reversed\n", * (unsigned long long)bswap_64(1024)); */ static inline uint64_t bswap_64(uint64_t val) { return ((val & (uint64_t)0x00000000000000ffULL) << 56) | ((val & (uint64_t)0x000000000000ff00ULL) << 40) | ((val & (uint64_t)0x0000000000ff0000ULL) << 24) | ((val & (uint64_t)0x00000000ff000000ULL) << 8) | ((val & (uint64_t)0x000000ff00000000ULL) >> 8) | ((val & (uint64_t)0x0000ff0000000000ULL) >> 24) | ((val & (uint64_t)0x00ff000000000000ULL) >> 40) | ((val & (uint64_t)0xff00000000000000ULL) >> 56); } #endif /* Sanity check the defines. We don't handle weird endianness. */ #if !HAVE_LITTLE_ENDIAN && !HAVE_BIG_ENDIAN #error "Unknown endian" #elif HAVE_LITTLE_ENDIAN && HAVE_BIG_ENDIAN #error "Can't compile for both big and little endian." #endif /** * cpu_to_le64 - convert a uint64_t value to little-endian * @native: value to convert */ static inline uint64_t cpu_to_le64(uint64_t native) { #if HAVE_LITTLE_ENDIAN return native; #else return bswap_64(native); #endif } /** * cpu_to_le32 - convert a uint32_t value to little-endian * @native: value to convert */ static inline uint32_t cpu_to_le32(uint32_t native) { #if HAVE_LITTLE_ENDIAN return native; #else return bswap_32(native); #endif } /** * cpu_to_le16 - convert a uint16_t value to little-endian * @native: value to convert */ static inline uint16_t cpu_to_le16(uint16_t native) { #if HAVE_LITTLE_ENDIAN return native; #else return bswap_16(native); #endif } /** * le64_to_cpu - convert a little-endian uint64_t value * @le_val: little-endian value to convert */ static inline uint64_t le64_to_cpu(uint64_t le_val) { #if HAVE_LITTLE_ENDIAN return le_val; #else return bswap_64(le_val); #endif } /** * le32_to_cpu - convert a little-endian uint32_t value * @le_val: little-endian value to convert */ static inline uint32_t le32_to_cpu(uint32_t le_val) { #if HAVE_LITTLE_ENDIAN return le_val; #else return bswap_32(le_val); #endif } /** * le16_to_cpu - convert a little-endian uint16_t value * @le_val: little-endian value to convert */ static inline uint16_t le16_to_cpu(uint16_t le_val) { #if HAVE_LITTLE_ENDIAN return le_val; #else return bswap_16(le_val); #endif } /** * cpu_to_be64 - convert a uint64_t value to big endian. * @native: value to convert */ static inline uint64_t cpu_to_be64(uint64_t native) { #if HAVE_LITTLE_ENDIAN return bswap_64(native); #else return native; #endif } /** * cpu_to_be32 - convert a uint32_t value to big endian. * @native: value to convert */ static inline uint32_t cpu_to_be32(uint32_t native) { #if HAVE_LITTLE_ENDIAN return bswap_32(native); #else return native; #endif } /** * cpu_to_be16 - convert a uint16_t value to big endian. * @native: value to convert */ static inline uint16_t cpu_to_be16(uint16_t native) { #if HAVE_LITTLE_ENDIAN return bswap_16(native); #else return native; #endif } /** * be64_to_cpu - convert a big-endian uint64_t value * @be_val: big-endian value to convert */ static inline uint64_t be64_to_cpu(uint64_t be_val) { #if HAVE_LITTLE_ENDIAN return bswap_64(be_val); #else return be_val; #endif } /** * be32_to_cpu - convert a big-endian uint32_t value * @be_val: big-endian value to convert */ static inline uint32_t be32_to_cpu(uint32_t be_val) { #if HAVE_LITTLE_ENDIAN return bswap_32(be_val); #else return be_val; #endif } /** * be16_to_cpu - convert a big-endian uint16_t value * @be_val: big-endian value to convert */ static inline uint16_t be16_to_cpu(uint16_t be_val) { #if HAVE_LITTLE_ENDIAN return bswap_16(be_val); #else return be_val; #endif } #endif /* CCAN_ENDIAN_H */ ntdb-1.0/lib/ccan/endian/test/000077500000000000000000000000001224151530700161705ustar00rootroot00000000000000ntdb-1.0/lib/ccan/endian/test/run.c000066400000000000000000000052361224151530700171460ustar00rootroot00000000000000#include #include #include #include int main(int argc, char *argv[]) { union { uint64_t u64; unsigned char u64_bytes[8]; } u64; union { uint32_t u32; unsigned char u32_bytes[4]; } u32; union { uint16_t u16; unsigned char u16_bytes[2]; } u16; plan_tests(48); /* Straight swap tests. */ u64.u64_bytes[0] = 0x00; u64.u64_bytes[1] = 0x11; u64.u64_bytes[2] = 0x22; u64.u64_bytes[3] = 0x33; u64.u64_bytes[4] = 0x44; u64.u64_bytes[5] = 0x55; u64.u64_bytes[6] = 0x66; u64.u64_bytes[7] = 0x77; u64.u64 = bswap_64(u64.u64); ok1(u64.u64_bytes[7] == 0x00); ok1(u64.u64_bytes[6] == 0x11); ok1(u64.u64_bytes[5] == 0x22); ok1(u64.u64_bytes[4] == 0x33); ok1(u64.u64_bytes[3] == 0x44); ok1(u64.u64_bytes[2] == 0x55); ok1(u64.u64_bytes[1] == 0x66); ok1(u64.u64_bytes[0] == 0x77); u32.u32_bytes[0] = 0x00; u32.u32_bytes[1] = 0x11; u32.u32_bytes[2] = 0x22; u32.u32_bytes[3] = 0x33; u32.u32 = bswap_32(u32.u32); ok1(u32.u32_bytes[3] == 0x00); ok1(u32.u32_bytes[2] == 0x11); ok1(u32.u32_bytes[1] == 0x22); ok1(u32.u32_bytes[0] == 0x33); u16.u16_bytes[0] = 0x00; u16.u16_bytes[1] = 0x11; u16.u16 = bswap_16(u16.u16); ok1(u16.u16_bytes[1] == 0x00); ok1(u16.u16_bytes[0] == 0x11); /* Endian tests. */ u64.u64 = cpu_to_le64(0x0011223344556677ULL); ok1(u64.u64_bytes[0] == 0x77); ok1(u64.u64_bytes[1] == 0x66); ok1(u64.u64_bytes[2] == 0x55); ok1(u64.u64_bytes[3] == 0x44); ok1(u64.u64_bytes[4] == 0x33); ok1(u64.u64_bytes[5] == 0x22); ok1(u64.u64_bytes[6] == 0x11); ok1(u64.u64_bytes[7] == 0x00); ok1(le64_to_cpu(u64.u64) == 0x0011223344556677ULL); u64.u64 = cpu_to_be64(0x0011223344556677ULL); ok1(u64.u64_bytes[7] == 0x77); ok1(u64.u64_bytes[6] == 0x66); ok1(u64.u64_bytes[5] == 0x55); ok1(u64.u64_bytes[4] == 0x44); ok1(u64.u64_bytes[3] == 0x33); ok1(u64.u64_bytes[2] == 0x22); ok1(u64.u64_bytes[1] == 0x11); ok1(u64.u64_bytes[0] == 0x00); ok1(be64_to_cpu(u64.u64) == 0x0011223344556677ULL); u32.u32 = cpu_to_le32(0x00112233); ok1(u32.u32_bytes[0] == 0x33); ok1(u32.u32_bytes[1] == 0x22); ok1(u32.u32_bytes[2] == 0x11); ok1(u32.u32_bytes[3] == 0x00); ok1(le32_to_cpu(u32.u32) == 0x00112233); u32.u32 = cpu_to_be32(0x00112233); ok1(u32.u32_bytes[3] == 0x33); ok1(u32.u32_bytes[2] == 0x22); ok1(u32.u32_bytes[1] == 0x11); ok1(u32.u32_bytes[0] == 0x00); ok1(be32_to_cpu(u32.u32) == 0x00112233); u16.u16 = cpu_to_le16(0x0011); ok1(u16.u16_bytes[0] == 0x11); ok1(u16.u16_bytes[1] == 0x00); ok1(le16_to_cpu(u16.u16) == 0x0011); u16.u16 = cpu_to_be16(0x0011); ok1(u16.u16_bytes[1] == 0x11); ok1(u16.u16_bytes[0] == 0x00); ok1(be16_to_cpu(u16.u16) == 0x0011); exit(exit_status()); } ntdb-1.0/lib/ccan/err/000077500000000000000000000000001224151530700145435ustar00rootroot00000000000000ntdb-1.0/lib/ccan/err/.depends000066400000000000000000000000161224151530700161630ustar00rootroot00000000000000ccan/compiler ntdb-1.0/lib/ccan/err/_info000066400000000000000000000015371224151530700155660ustar00rootroot00000000000000#include #include #include "config.h" /** * err - err(), errx(), warn() and warnx(), as per BSD's err.h. * * A few platforms don't provide err.h; for those, this provides replacements. * For most, it simple includes the system err.h. * * Unfortunately, you have to call err_set_progname() to tell the replacements * your program name, otherwise it prints "unknown program". * * Example: * #include * * int main(int argc, char *argv[]) * { * err_set_progname(argv[0]); * if (argc != 1) * errx(1, "Expect no arguments"); * exit(0); * } * * License: Public domain * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { #if !HAVE_ERR_H printf("ccan/compiler\n"); #endif return 0; } return 1; } ntdb-1.0/lib/ccan/err/err.c000066400000000000000000000021161224151530700154770ustar00rootroot00000000000000#include "err.h" #if !HAVE_ERR_H #include #include #include #include #include static const char *progname = "unknown program"; void err_set_progname(const char *name) { progname = name; } void NORETURN err(int eval, const char *fmt, ...) { int err_errno = errno; va_list ap; fprintf(stderr, "%s: ", progname); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, ": %s\n", strerror(err_errno)); exit(eval); } void NORETURN errx(int eval, const char *fmt, ...) { va_list ap; fprintf(stderr, "%s: ", progname); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, "\n"); exit(eval); } void warn(const char *fmt, ...) { int err_errno = errno; va_list ap; fprintf(stderr, "%s: ", progname); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, ": %s\n", strerror(err_errno)); } void warnx(const char *fmt, ...) { va_list ap; fprintf(stderr, "%s: ", progname); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, "\n"); } #endif ntdb-1.0/lib/ccan/err/err.h000066400000000000000000000040761224151530700155130ustar00rootroot00000000000000#ifndef CCAN_ERR_H #define CCAN_ERR_H #include "config.h" #if HAVE_ERR_H #include /* This is unnecessary with a real err.h. See below */ #define err_set_progname(name) ((void)name) #else #include /** * err_set_progname - set the program name * @name: the name to use for err, errx, warn and warnx * * The BSD err.h calls know the program name, unfortunately there's no * portable way for the CCAN replacements to do that on other systems. * * If you don't call this with argv[0], it will be "unknown program". * * Example: * err_set_progname(argv[0]); */ void err_set_progname(const char *name); /** * err - exit(eval) with message based on format and errno. * @eval: the exit code * @fmt: the printf-style format string * * The format string is printed to stderr like so: * : : \n * * Example: * char *p = strdup("hello"); * if (!p) * err(1, "Failed to strdup 'hello'"); */ void NORETURN err(int eval, const char *fmt, ...); /** * errx - exit(eval) with message based on format. * @eval: the exit code * @fmt: the printf-style format string * * The format string is printed to stderr like so: * : \n * * Example: * if (argc != 1) * errx(1, "I don't expect any arguments"); */ void NORETURN errx(int eval, const char *fmt, ...); /** * warn - print a message to stderr based on format and errno. * @eval: the exit code * @fmt: the printf-style format string * * The format string is printed to stderr like so: * : : \n * * Example: * char *p = strdup("hello"); * if (!p) * warn("Failed to strdup 'hello'"); */ void warn(const char *fmt, ...); /** * warnx - print a message to stderr based on format. * @eval: the exit code * @fmt: the printf-style format string * * The format string is printed to stderr like so: * : \n * * Example: * if (argc != 1) * warnx("I don't expect any arguments (ignoring)"); */ void warnx(const char *fmt, ...); #endif #endif /* CCAN_ERR_H */ ntdb-1.0/lib/ccan/err/test/000077500000000000000000000000001224151530700155225ustar00rootroot00000000000000ntdb-1.0/lib/ccan/err/test/run.c000066400000000000000000000056701224151530700165020ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #define BUFFER_MAX 1024 int main(int argc, char *argv[]) { int pfd[2]; const char *base; plan_tests(24); err_set_progname(argv[0]); /* In case it only prints out the basename of argv[0]. */ base = strrchr(argv[0], '/'); if (base) base++; else base = argv[0]; /* Test err() in child */ pipe(pfd); fflush(stdout); if (fork()) { char buffer[BUFFER_MAX+1]; unsigned int i; int status; /* We are parent. */ close(pfd[1]); for (i = 0; i < BUFFER_MAX; i++) { if (read(pfd[0], buffer + i, 1) == 0) { buffer[i] = '\0'; ok1(strstr(buffer, "running err:")); ok1(strstr(buffer, strerror(ENOENT))); ok1(strstr(buffer, base)); ok1(buffer[i-1] == '\n'); break; } } close(pfd[0]); ok1(wait(&status) != -1); ok1(WIFEXITED(status)); ok1(WEXITSTATUS(status) == 17); } else { close(pfd[0]); dup2(pfd[1], STDERR_FILENO); errno = ENOENT; err(17, "running %s", "err"); abort(); } /* Test errx() in child */ pipe(pfd); fflush(stdout); if (fork()) { char buffer[BUFFER_MAX+1]; unsigned int i; int status; /* We are parent. */ close(pfd[1]); for (i = 0; i < BUFFER_MAX; i++) { if (read(pfd[0], buffer + i, 1) == 0) { buffer[i] = '\0'; ok1(strstr(buffer, "running errx\n")); ok1(strstr(buffer, base)); break; } } close(pfd[0]); ok1(wait(&status) != -1); ok1(WIFEXITED(status)); ok1(WEXITSTATUS(status) == 17); } else { close(pfd[0]); dup2(pfd[1], STDERR_FILENO); errx(17, "running %s", "errx"); abort(); } /* Test warn() in child */ pipe(pfd); fflush(stdout); if (fork()) { char buffer[BUFFER_MAX+1]; unsigned int i; int status; /* We are parent. */ close(pfd[1]); for (i = 0; i < BUFFER_MAX; i++) { if (read(pfd[0], buffer + i, 1) == 0) { buffer[i] = '\0'; ok1(strstr(buffer, "running warn:")); ok1(strstr(buffer, strerror(ENOENT))); ok1(strstr(buffer, base)); ok1(buffer[i-1] == '\n'); break; } } close(pfd[0]); ok1(wait(&status) != -1); ok1(WIFEXITED(status)); ok1(WEXITSTATUS(status) == 17); } else { close(pfd[0]); dup2(pfd[1], STDERR_FILENO); errno = ENOENT; warn("running %s", "warn"); exit(17); } /* Test warnx() in child */ pipe(pfd); fflush(stdout); if (fork()) { char buffer[BUFFER_MAX+1]; unsigned int i; int status; /* We are parent. */ close(pfd[1]); for (i = 0; i < BUFFER_MAX; i++) { if (read(pfd[0], buffer + i, 1) == 0) { buffer[i] = '\0'; ok1(strstr(buffer, "running warnx\n")); ok1(strstr(buffer, base)); break; } } close(pfd[0]); ok1(wait(&status) != -1); ok1(WIFEXITED(status)); ok1(WEXITSTATUS(status) == 17); } else { close(pfd[0]); dup2(pfd[1], STDERR_FILENO); warnx("running %s", "warnx"); exit(17); } return exit_status(); } ntdb-1.0/lib/ccan/failtest/000077500000000000000000000000001224151530700155665ustar00rootroot00000000000000ntdb-1.0/lib/ccan/failtest/LICENSE000066400000000000000000000167251224151530700166060ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ntdb-1.0/lib/ccan/failtest/_info000066400000000000000000000040041224151530700166010ustar00rootroot00000000000000#include #include #include "config.h" /** * failtest - unit test helpers for testing malloc and other failures. * * The failtest module overrides various standard functions, and forks * your unit test at those points to test failure paths. The failing * child are expected to fail (eg. when malloc fails), but should not * leak memory or crash. After including failtest_override.h, you can * include failtest_restore.h to return to non-failing versions. * * The unit test is a normal CCAN tap-style test, except it should * start by calling failtest_init() and end by calling * failtest_exit(). * * You can control what functions fail: see failtest_hook. * * Example: * #include * #include * #include * #include * #include * #include * * int main(int argc, char *argv[]) * { * char *a, *b; * * failtest_init(argc, argv); * plan_tests(3); * * // Simple malloc test. * a = malloc(100); * if (ok1(a)) { * // Fill the memory. * memset(a, 'x', 100); * b = realloc(a, 200); * if (ok1(b)) { * // Fill the rest of the memory. * memset(b + 100, 'y', 100); * // Check it got a copy of a as expected. * ok1(strspn(b, "x") == 100); * free(b); * } else { * // Easy to miss: free a on realloc failure! * free(a); * } * } * failtest_exit(exit_status()); * } * * License: LGPL * Author: Rusty Russell * Ccanlint: * // valgrind seems to mess up rlimit. * tests_pass_valgrind test/run-with-fdlimit.c:FAIL */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/build_assert\n"); printf("ccan/compiler\n"); printf("ccan/err\n"); printf("ccan/hash\n"); printf("ccan/htable\n"); printf("ccan/list\n"); printf("ccan/read_write_all\n"); printf("ccan/str\n"); printf("ccan/time\n"); printf("ccan/tlist\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/failtest/failtest.c000066400000000000000000001246731224151530700175620ustar00rootroot00000000000000/* Licensed under LGPL - see LICENSE file for details */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum failtest_result (*failtest_hook)(struct tlist_calls *); static FILE *tracef = NULL, *warnf; static int traceindent = 0; unsigned int failtest_timeout_ms = 20000; const char *failpath; const char *debugpath; enum info_type { WRITE, RELEASE_LOCKS, FAILURE, SUCCESS, UNEXPECTED }; struct lock_info { int fd; /* end is inclusive: you can't have a 0-byte lock. */ off_t start, end; int type; }; /* We hash the call location together with its backtrace. */ static size_t hash_call(const struct failtest_call *call) { return hash(call->file, strlen(call->file), hash(&call->line, 1, hash(call->backtrace, call->backtrace_num, call->type))); } static bool call_eq(const struct failtest_call *call1, const struct failtest_call *call2) { unsigned int i; if (strcmp(call1->file, call2->file) != 0 || call1->line != call2->line || call1->type != call2->type || call1->backtrace_num != call2->backtrace_num) return false; for (i = 0; i < call1->backtrace_num; i++) if (call1->backtrace[i] != call2->backtrace[i]) return false; return true; } /* Defines struct failtable. */ HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call, call_eq, failtable); bool (*failtest_exit_check)(struct tlist_calls *history); /* The entire history of all calls. */ static struct tlist_calls history = TLIST_INIT(history); /* If we're a child, the fd two write control info to the parent. */ static int control_fd = -1; /* If we're a child, this is the first call we did ourselves. */ static struct failtest_call *our_history_start = NULL; /* For printing runtime with --trace. */ static struct timeval start; /* Set when failtest_hook returns FAIL_PROBE */ static bool probing = false; /* Table to track duplicates. */ static struct failtable failtable; /* Array of writes which our child did. We report them on failure. */ static struct write_call *child_writes = NULL; static unsigned int child_writes_num = 0; /* fcntl locking info. */ static pid_t lock_owner; static struct lock_info *locks = NULL; static unsigned int lock_num = 0; /* Our original pid, which we return to anyone who asks. */ static pid_t orig_pid; /* Mapping from failtest_type to char. */ static const char info_to_arg[] = "mceoxprwfal"; /* Dummy call used for failtest_undo wrappers. */ static struct failtest_call unrecorded_call; struct contents_saved { size_t count; off_t off; off_t old_len; char contents[1]; }; /* File contents, saved in this child only. */ struct saved_mmapped_file { struct saved_mmapped_file *next; struct failtest_call *opener; struct contents_saved *s; }; static struct saved_mmapped_file *saved_mmapped_files; #if HAVE_BACKTRACE #include static void **get_backtrace(unsigned int *num) { static unsigned int max_back = 100; void **ret; again: ret = malloc(max_back * sizeof(void *)); *num = backtrace(ret, max_back); if (*num == max_back) { free(ret); max_back *= 2; goto again; } return ret; } #else /* This will test slightly less, since will consider all of the same * calls as identical. But, it's slightly faster! */ static void **get_backtrace(unsigned int *num) { *num = 0; return NULL; } #endif /* HAVE_BACKTRACE */ static struct failtest_call *add_history_(enum failtest_call_type type, bool can_leak, const char *file, unsigned int line, const void *elem, size_t elem_size) { struct failtest_call *call; /* NULL file is how we suppress failure. */ if (!file) return &unrecorded_call; call = malloc(sizeof *call); call->type = type; call->can_leak = can_leak; call->file = file; call->line = line; call->cleanup = NULL; call->backtrace = get_backtrace(&call->backtrace_num); memcpy(&call->u, elem, elem_size); tlist_add_tail(&history, call, list); return call; } #define add_history(type, can_leak, file, line, elem) \ add_history_((type), (can_leak), (file), (line), (elem), sizeof(*(elem))) /* We do a fake call inside a sizeof(), to check types. */ #define set_cleanup(call, clean, type) \ (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL, false),1), (clean)) /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */ static int move_fd_to_high(int fd) { int i; struct rlimit lim; int max; if (getrlimit(RLIMIT_NOFILE, &lim) == 0) { max = lim.rlim_cur; } else max = FD_SETSIZE; for (i = max - 1; i > fd; i--) { if (fcntl(i, F_GETFL) == -1 && errno == EBADF) { if (dup2(fd, i) == -1) { warn("Failed to dup fd %i to %i", fd, i); continue; } close(fd); return i; } } /* Nothing? Really? Er... ok? */ return fd; } static bool read_write_info(int fd) { struct write_call *w; char *buf; /* We don't need all of this, but it's simple. */ child_writes = realloc(child_writes, (child_writes_num+1) * sizeof(child_writes[0])); w = &child_writes[child_writes_num]; if (!read_all(fd, w, sizeof(*w))) return false; w->buf = buf = malloc(w->count); if (!read_all(fd, buf, w->count)) return false; child_writes_num++; return true; } static char *failpath_string(void) { struct failtest_call *i; char *ret = strdup(""); unsigned len = 0; /* Inefficient, but who cares? */ tlist_for_each(&history, i, list) { ret = realloc(ret, len + 2); ret[len] = info_to_arg[i->type]; if (i->fail) ret[len] = toupper(ret[len]); ret[++len] = '\0'; } return ret; } static void do_warn(int e, const char *fmt, va_list ap) { char *p = failpath_string(); vfprintf(warnf, fmt, ap); if (e != -1) fprintf(warnf, ": %s", strerror(e)); fprintf(warnf, " [%s]\n", p); free(p); } static void fwarn(const char *fmt, ...) { va_list ap; int e = errno; va_start(ap, fmt); do_warn(e, fmt, ap); va_end(ap); } static void fwarnx(const char *fmt, ...) { va_list ap; va_start(ap, fmt); do_warn(-1, fmt, ap); va_end(ap); } static void tell_parent(enum info_type type) { if (control_fd != -1) write_all(control_fd, &type, sizeof(type)); } static void child_fail(const char *out, size_t outlen, const char *fmt, ...) { va_list ap; char *path = failpath_string(); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, "%.*s", (int)outlen, out); printf("To reproduce: --failpath=%s\n", path); free(path); tell_parent(FAILURE); exit(1); } static void PRINTF_FMT(1, 2) trace(const char *fmt, ...) { va_list ap; unsigned int i; char *p; static int idx; if (!tracef) return; for (i = 0; i < traceindent; i++) fprintf(tracef, " "); p = failpath_string(); fprintf(tracef, "%i: %u: %s ", idx++, (unsigned int)getpid(), p); va_start(ap, fmt); vfprintf(tracef, fmt, ap); va_end(ap); free(p); } static pid_t child; static void hand_down(int signum) { kill(child, signum); } static void release_locks(void) { /* Locks were never acquired/reacquired? */ if (lock_owner == 0) return; /* We own them? Release them all. */ if (lock_owner == getpid()) { unsigned int i; struct flock fl; fl.l_type = F_UNLCK; fl.l_whence = SEEK_SET; fl.l_start = 0; fl.l_len = 0; trace("Releasing %u locks\n", lock_num); for (i = 0; i < lock_num; i++) fcntl(locks[i].fd, F_SETLK, &fl); } else { /* Our parent must have them; pass request up. */ enum info_type type = RELEASE_LOCKS; assert(control_fd != -1); write_all(control_fd, &type, sizeof(type)); } lock_owner = 0; } /* off_t is a signed type. Getting its max is non-trivial. */ static off_t off_max(void) { BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8); if (sizeof(off_t) == 4) return (off_t)0x7FFFFFF; else return (off_t)0x7FFFFFFFFFFFFFFULL; } static void get_locks(void) { unsigned int i; struct flock fl; if (lock_owner == getpid()) return; if (lock_owner != 0) { enum info_type type = RELEASE_LOCKS; assert(control_fd != -1); trace("Asking parent to release locks\n"); write_all(control_fd, &type, sizeof(type)); } fl.l_whence = SEEK_SET; for (i = 0; i < lock_num; i++) { fl.l_type = locks[i].type; fl.l_start = locks[i].start; if (locks[i].end == off_max()) fl.l_len = 0; else fl.l_len = locks[i].end - locks[i].start + 1; if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0) abort(); } trace("Acquired %u locks\n", lock_num); lock_owner = getpid(); } static struct contents_saved *save_contents(const char *filename, int fd, size_t count, off_t off, const char *why) { struct contents_saved *s = malloc(sizeof(*s) + count); ssize_t ret; s->off = off; ret = pread(fd, s->contents, count, off); if (ret < 0) { fwarn("failtest_write: failed to save old contents!"); s->count = 0; } else s->count = ret; /* Use lseek to get the size of file, but we have to restore * file offset */ off = lseek(fd, 0, SEEK_CUR); s->old_len = lseek(fd, 0, SEEK_END); lseek(fd, off, SEEK_SET); trace("Saving %p %s %zu@%llu after %s (filelength %llu) via fd %i\n", s, filename, s->count, (long long)s->off, why, (long long)s->old_len, fd); return s; } static void restore_contents(struct failtest_call *opener, struct contents_saved *s, bool restore_offset, const char *caller) { int fd; /* The top parent doesn't need to restore. */ if (control_fd == -1) return; /* Has the fd been closed? */ if (opener->u.open.closed) { /* Reopen, replace fd, close silently as we clean up. */ fd = open(opener->u.open.pathname, O_RDWR); if (fd < 0) { fwarn("failtest: could not reopen %s to clean up %s!", opener->u.open.pathname, caller); return; } /* Make it clearly distinguisable from a "normal" fd. */ fd = move_fd_to_high(fd); trace("Reopening %s to restore it (was fd %i, now %i)\n", opener->u.open.pathname, opener->u.open.ret, fd); opener->u.open.ret = fd; opener->u.open.closed = false; } fd = opener->u.open.ret; trace("Restoring %p %s %zu@%llu after %s (filelength %llu) via fd %i\n", s, opener->u.open.pathname, s->count, (long long)s->off, caller, (long long)s->old_len, fd); if (pwrite(fd, s->contents, s->count, s->off) != s->count) { fwarn("failtest: write failed cleaning up %s for %s!", opener->u.open.pathname, caller); } if (ftruncate(fd, s->old_len) != 0) { fwarn("failtest_write: truncate failed cleaning up %s for %s!", opener->u.open.pathname, caller); } if (restore_offset) { trace("Restoring offset of fd %i to %llu\n", fd, (long long)s->off); lseek(fd, s->off, SEEK_SET); } } /* We save/restore most things on demand, but always do mmaped files. */ static void save_mmapped_files(void) { struct failtest_call *i; trace("Saving mmapped files in child\n"); tlist_for_each_rev(&history, i, list) { struct mmap_call *m = &i->u.mmap; struct saved_mmapped_file *s; if (i->type != FAILTEST_MMAP) continue; /* FIXME: We only handle mmapped files where fd is still open. */ if (m->opener->u.open.closed) continue; s = malloc(sizeof *s); s->s = save_contents(m->opener->u.open.pathname, m->fd, m->length, m->offset, "mmapped file before fork"); s->opener = m->opener; s->next = saved_mmapped_files; saved_mmapped_files = s; } } static void free_mmapped_files(bool restore) { trace("%s mmapped files in child\n", restore ? "Restoring" : "Discarding"); while (saved_mmapped_files) { struct saved_mmapped_file *next = saved_mmapped_files->next; if (restore) restore_contents(saved_mmapped_files->opener, saved_mmapped_files->s, false, "saved mmap"); free(saved_mmapped_files->s); free(saved_mmapped_files); saved_mmapped_files = next; } } /* Returns a FAILTEST_OPEN, FAILTEST_PIPE or NULL. */ static struct failtest_call *opener_of(int fd) { struct failtest_call *i; /* Don't get confused and match genuinely failed opens. */ if (fd < 0) return NULL; /* Figure out the set of live fds. */ tlist_for_each_rev(&history, i, list) { if (i->fail) continue; switch (i->type) { case FAILTEST_CLOSE: if (i->u.close.fd == fd) { return NULL; } break; case FAILTEST_OPEN: if (i->u.open.ret == fd) { if (i->u.open.closed) return NULL; return i; } break; case FAILTEST_PIPE: if (i->u.pipe.fds[0] == fd || i->u.pipe.fds[1] == fd) { return i; } break; default: break; } } /* FIXME: socket, dup, etc are untracked! */ return NULL; } static void free_call(struct failtest_call *call) { /* We don't do this in cleanup: needed even for failed opens. */ if (call->type == FAILTEST_OPEN) free((char *)call->u.open.pathname); free(call->backtrace); tlist_del_from(&history, call, list); free(call); } /* Free up memory, so valgrind doesn't report leaks. */ static void free_everything(void) { struct failtest_call *i; while ((i = tlist_top(&history, list)) != NULL) free_call(i); failtable_clear(&failtable); } static NORETURN void failtest_cleanup(bool forced_cleanup, int status) { struct failtest_call *i; bool restore = true; /* For children, we don't care if they "failed" the testing. */ if (control_fd != -1) status = 0; else /* We don't restore contents for original parent. */ restore = false; /* Cleanup everything, in reverse order. */ tlist_for_each_rev(&history, i, list) { /* Don't restore things our parent did. */ if (i == our_history_start) restore = false; if (i->fail) continue; if (i->cleanup) i->cleanup(&i->u, restore); /* But their program shouldn't leak, even on failure. */ if (!forced_cleanup && i->can_leak) { printf("Leak at %s:%u: --failpath=%s\n", i->file, i->line, failpath_string()); status = 1; } } /* Put back mmaped files the way our parent (if any) expects. */ free_mmapped_files(true); free_everything(); if (status == 0) tell_parent(SUCCESS); else tell_parent(FAILURE); exit(status); } static bool following_path(void) { if (!failpath) return false; /* + means continue after end, like normal. */ if (*failpath == '+') { failpath = NULL; return false; } return true; } static bool follow_path(struct failtest_call *call) { if (*failpath == '\0') { /* Continue, but don't inject errors. */ return call->fail = false; } if (tolower((unsigned char)*failpath) != info_to_arg[call->type]) errx(1, "Failpath expected '%s' got '%c'\n", failpath, info_to_arg[call->type]); call->fail = cisupper(*(failpath++)); if (call->fail) call->can_leak = false; return call->fail; } static bool should_fail(struct failtest_call *call) { int status; int control[2], output[2]; enum info_type type = UNEXPECTED; char *out = NULL; size_t outlen = 0; struct failtest_call *dup; if (call == &unrecorded_call) return false; if (following_path()) return follow_path(call); /* Attach debugger if they asked for it. */ if (debugpath) { char *path; /* Pretend this last call matches whatever path wanted: * keeps valgrind happy. */ call->fail = cisupper(debugpath[strlen(debugpath)-1]); path = failpath_string(); if (streq(path, debugpath)) { char str[80]; /* Don't timeout. */ signal(SIGUSR1, SIG_IGN); sprintf(str, "xterm -e gdb /proc/%u/exe %u &", (unsigned int)getpid(), (unsigned int)getpid()); if (system(str) == 0) sleep(5); } else { /* Ignore last character: could be upper or lower. */ path[strlen(path)-1] = '\0'; if (!strstarts(debugpath, path)) { fprintf(stderr, "--debugpath not followed: %s\n", path); debugpath = NULL; } } free(path); } /* Are we probing? If so, we never fail twice. */ if (probing) { trace("Not failing %c due to FAIL_PROBE return\n", info_to_arg[call->type]); return call->fail = false; } /* Don't fail more than once in the same place. */ dup = failtable_get(&failtable, call); if (dup) { trace("Not failing %c due to duplicate\n", info_to_arg[call->type]); return call->fail = false; } if (failtest_hook) { switch (failtest_hook(&history)) { case FAIL_OK: break; case FAIL_PROBE: probing = true; break; case FAIL_DONT_FAIL: trace("Not failing %c due to failhook return\n", info_to_arg[call->type]); call->fail = false; return false; default: abort(); } } /* Add it to our table of calls. */ failtable_add(&failtable, call); /* We're going to fail in the child. */ call->fail = true; if (pipe(control) != 0 || pipe(output) != 0) err(1, "opening pipe"); /* Move out the way, to high fds. */ control[0] = move_fd_to_high(control[0]); control[1] = move_fd_to_high(control[1]); output[0] = move_fd_to_high(output[0]); output[1] = move_fd_to_high(output[1]); /* Prevent double-printing (in child and parent) */ fflush(stdout); fflush(warnf); if (tracef) fflush(tracef); child = fork(); if (child == -1) err(1, "forking failed"); if (child == 0) { traceindent++; if (tracef) { struct timeval diff; const char *p; char *failpath; struct failtest_call *c; c = tlist_tail(&history, list); diff = time_sub(time_now(), start); failpath = failpath_string(); p = strrchr(c->file, '/'); if (p) p++; else p = c->file; trace("%u->%u (%u.%02u): %s (%s:%u)\n", (unsigned int)getppid(), (unsigned int)getpid(), (int)diff.tv_sec, (int)diff.tv_usec / 10000, failpath, p, c->line); free(failpath); } /* From here on, we have to clean up! */ our_history_start = tlist_tail(&history, list); close(control[0]); close(output[0]); /* Don't swallow stderr if we're tracing. */ if (!tracef) { dup2(output[1], STDOUT_FILENO); dup2(output[1], STDERR_FILENO); if (output[1] != STDOUT_FILENO && output[1] != STDERR_FILENO) close(output[1]); } control_fd = move_fd_to_high(control[1]); /* Forget any of our parent's saved files. */ free_mmapped_files(false); /* Now, save any files we need to. */ save_mmapped_files(); /* Failed calls can't leak. */ call->can_leak = false; return true; } signal(SIGUSR1, hand_down); close(control[1]); close(output[1]); /* We grab output so we can display it; we grab writes so we * can compare. */ do { struct pollfd pfd[2]; int ret; pfd[0].fd = output[0]; pfd[0].events = POLLIN|POLLHUP; pfd[1].fd = control[0]; pfd[1].events = POLLIN|POLLHUP; if (type == SUCCESS) ret = poll(pfd, 1, failtest_timeout_ms); else ret = poll(pfd, 2, failtest_timeout_ms); if (ret == 0) hand_down(SIGUSR1); if (ret < 0) { if (errno == EINTR) continue; err(1, "Poll returned %i", ret); } if (pfd[0].revents & POLLIN) { ssize_t len; out = realloc(out, outlen + 8192); len = read(output[0], out + outlen, 8192); outlen += len; } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) { if (read_all(control[0], &type, sizeof(type))) { if (type == WRITE) { if (!read_write_info(control[0])) break; } else if (type == RELEASE_LOCKS) { release_locks(); /* FIXME: Tell them we're done... */ } } } else if (pfd[0].revents & POLLHUP) { break; } } while (type != FAILURE); close(output[0]); close(control[0]); waitpid(child, &status, 0); if (!WIFEXITED(status)) { if (WTERMSIG(status) == SIGUSR1) child_fail(out, outlen, "Timed out"); else child_fail(out, outlen, "Killed by signal %u: ", WTERMSIG(status)); } /* Child printed failure already, just pass up exit code. */ if (type == FAILURE) { fprintf(stderr, "%.*s", (int)outlen, out); tell_parent(type); exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1); } if (WEXITSTATUS(status) != 0) child_fail(out, outlen, "Exited with status %i: ", WEXITSTATUS(status)); free(out); signal(SIGUSR1, SIG_DFL); /* Only child does probe. */ probing = false; /* We continue onwards without failing. */ call->fail = false; return false; } static void cleanup_calloc(struct calloc_call *call, bool restore) { trace("undoing calloc %p\n", call->ret); free(call->ret); } void *failtest_calloc(size_t nmemb, size_t size, const char *file, unsigned line) { struct failtest_call *p; struct calloc_call call; call.nmemb = nmemb; call.size = size; p = add_history(FAILTEST_CALLOC, true, file, line, &call); if (should_fail(p)) { p->u.calloc.ret = NULL; p->error = ENOMEM; } else { p->u.calloc.ret = calloc(nmemb, size); set_cleanup(p, cleanup_calloc, struct calloc_call); } trace("calloc %zu x %zu %s:%u -> %p\n", nmemb, size, file, line, p->u.calloc.ret); errno = p->error; return p->u.calloc.ret; } static void cleanup_malloc(struct malloc_call *call, bool restore) { trace("undoing malloc %p\n", call->ret); free(call->ret); } void *failtest_malloc(size_t size, const char *file, unsigned line) { struct failtest_call *p; struct malloc_call call; call.size = size; p = add_history(FAILTEST_MALLOC, true, file, line, &call); if (should_fail(p)) { p->u.malloc.ret = NULL; p->error = ENOMEM; } else { p->u.malloc.ret = malloc(size); set_cleanup(p, cleanup_malloc, struct malloc_call); } trace("malloc %zu %s:%u -> %p\n", size, file, line, p->u.malloc.ret); errno = p->error; return p->u.malloc.ret; } static void cleanup_realloc(struct realloc_call *call, bool restore) { trace("undoing realloc %p\n", call->ret); free(call->ret); } /* Walk back and find out if we got this ptr from a previous routine. */ static void fixup_ptr_history(void *ptr, const char *why) { struct failtest_call *i; /* Start at end of history, work back. */ tlist_for_each_rev(&history, i, list) { switch (i->type) { case FAILTEST_REALLOC: if (i->u.realloc.ret == ptr) { trace("found realloc %p %s:%u matching %s\n", ptr, i->file, i->line, why); i->cleanup = NULL; i->can_leak = false; return; } break; case FAILTEST_MALLOC: if (i->u.malloc.ret == ptr) { trace("found malloc %p %s:%u matching %s\n", ptr, i->file, i->line, why); i->cleanup = NULL; i->can_leak = false; return; } break; case FAILTEST_CALLOC: if (i->u.calloc.ret == ptr) { trace("found calloc %p %s:%u matching %s\n", ptr, i->file, i->line, why); i->cleanup = NULL; i->can_leak = false; return; } break; default: break; } } trace("Did not find %p matching %s\n", ptr, why); } void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line) { struct failtest_call *p; struct realloc_call call; call.size = size; p = add_history(FAILTEST_REALLOC, true, file, line, &call); /* FIXME: Try one child moving allocation, one not. */ if (should_fail(p)) { p->u.realloc.ret = NULL; p->error = ENOMEM; } else { /* Don't catch this one in the history fixup... */ p->u.realloc.ret = NULL; fixup_ptr_history(ptr, "realloc"); p->u.realloc.ret = realloc(ptr, size); set_cleanup(p, cleanup_realloc, struct realloc_call); } trace("realloc %p %s:%u -> %p\n", ptr, file, line, p->u.realloc.ret); errno = p->error; return p->u.realloc.ret; } /* FIXME: Record free, so we can terminate fixup_ptr_history correctly. * If there's an alloc we don't see, it could get confusing if it matches * a previous allocation we did see. */ void failtest_free(void *ptr) { fixup_ptr_history(ptr, "free"); trace("free %p\n", ptr); free(ptr); } static struct contents_saved *save_file(const char *pathname) { int fd; struct contents_saved *s; fd = open(pathname, O_RDONLY); if (fd < 0) return NULL; s = save_contents(pathname, fd, lseek(fd, 0, SEEK_END), 0, "open with O_TRUNC"); close(fd); return s; } /* Optimization: don't create a child for an open which *we know* * would fail anyway. */ static bool open_would_fail(const char *pathname, int flags) { if ((flags & O_ACCMODE) == O_RDONLY) return access(pathname, R_OK) != 0; if (!(flags & O_CREAT)) { if ((flags & O_ACCMODE) == O_WRONLY) return access(pathname, W_OK) != 0; if ((flags & O_ACCMODE) == O_RDWR) return access(pathname, W_OK) != 0 || access(pathname, R_OK) != 0; } /* FIXME: We could check if it exists, for O_CREAT|O_EXCL */ return false; } static void cleanup_open(struct open_call *call, bool restore) { if (restore && call->saved) restore_contents(container_of(call, struct failtest_call, u.open), call->saved, false, "open with O_TRUNC"); if (!call->closed) { trace("Cleaning up open %s by closing fd %i\n", call->pathname, call->ret); close(call->ret); call->closed = true; } free(call->saved); } int failtest_open(const char *pathname, const char *file, unsigned line, ...) { struct failtest_call *p; struct open_call call; va_list ap; call.pathname = strdup(pathname); va_start(ap, line); call.flags = va_arg(ap, int); call.always_save = false; call.closed = false; if (call.flags & O_CREAT) { call.mode = va_arg(ap, int); va_end(ap); } p = add_history(FAILTEST_OPEN, true, file, line, &call); /* Avoid memory leak! */ if (p == &unrecorded_call) free((char *)call.pathname); if (should_fail(p)) { /* Don't bother inserting failures that would happen anyway. */ if (open_would_fail(pathname, call.flags)) { trace("Open would have failed anyway: stopping\n"); failtest_cleanup(true, 0); } p->u.open.ret = -1; /* FIXME: Play with error codes? */ p->error = EACCES; } else { /* Save the old version if they're truncating it. */ if (call.flags & O_TRUNC) p->u.open.saved = save_file(pathname); else p->u.open.saved = NULL; p->u.open.ret = open(pathname, call.flags, call.mode); if (p->u.open.ret == -1) { p->u.open.closed = true; p->can_leak = false; } else { set_cleanup(p, cleanup_open, struct open_call); } } trace("open %s %s:%u -> %i (opener %p)\n", pathname, file, line, p->u.open.ret, &p->u.open); errno = p->error; return p->u.open.ret; } static void cleanup_mmap(struct mmap_call *mmap, bool restore) { trace("cleaning up mmap @%p (opener %p)\n", mmap->ret, mmap->opener); if (restore) restore_contents(mmap->opener, mmap->saved, false, "mmap"); free(mmap->saved); } void *failtest_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset, const char *file, unsigned line) { struct failtest_call *p; struct mmap_call call; call.addr = addr; call.length = length; call.prot = prot; call.flags = flags; call.offset = offset; call.fd = fd; call.opener = opener_of(fd); /* If we don't know what file it was, don't fail. */ if (!call.opener) { if (fd != -1) { fwarnx("failtest_mmap: couldn't figure out source for" " fd %i at %s:%u", fd, file, line); } addr = mmap(addr, length, prot, flags, fd, offset); trace("mmap of fd %i -> %p (opener = NULL)\n", fd, addr); return addr; } p = add_history(FAILTEST_MMAP, false, file, line, &call); if (should_fail(p)) { p->u.mmap.ret = MAP_FAILED; p->error = ENOMEM; } else { p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset); /* Save contents if we're writing to a normal file */ if (p->u.mmap.ret != MAP_FAILED && (prot & PROT_WRITE) && call.opener->type == FAILTEST_OPEN) { const char *fname = call.opener->u.open.pathname; p->u.mmap.saved = save_contents(fname, fd, length, offset, "being mmapped"); set_cleanup(p, cleanup_mmap, struct mmap_call); } } trace("mmap of fd %i %s:%u -> %p (opener = %p)\n", fd, file, line, addr, call.opener); errno = p->error; return p->u.mmap.ret; } /* Since OpenBSD can't handle adding args, we use this file and line. * This will make all mmaps look the same, reducing coverage. */ void *failtest_mmap_noloc(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { return failtest_mmap(addr, length, prot, flags, fd, offset, __FILE__, __LINE__); } static void cleanup_pipe(struct pipe_call *call, bool restore) { trace("cleaning up pipe fd=%i%s,%i%s\n", call->fds[0], call->closed[0] ? "(already closed)" : "", call->fds[1], call->closed[1] ? "(already closed)" : ""); if (!call->closed[0]) close(call->fds[0]); if (!call->closed[1]) close(call->fds[1]); } int failtest_pipe(int pipefd[2], const char *file, unsigned line) { struct failtest_call *p; struct pipe_call call; p = add_history(FAILTEST_PIPE, true, file, line, &call); if (should_fail(p)) { p->u.open.ret = -1; /* FIXME: Play with error codes? */ p->error = EMFILE; } else { p->u.pipe.ret = pipe(p->u.pipe.fds); p->u.pipe.closed[0] = p->u.pipe.closed[1] = false; set_cleanup(p, cleanup_pipe, struct pipe_call); } trace("pipe %s:%u -> %i,%i\n", file, line, p->u.pipe.ret ? -1 : p->u.pipe.fds[0], p->u.pipe.ret ? -1 : p->u.pipe.fds[1]); /* This causes valgrind to notice if they use pipefd[] after failure */ memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds)); errno = p->error; return p->u.pipe.ret; } static void cleanup_read(struct read_call *call, bool restore) { if (restore) { trace("cleaning up read on fd %i: seeking to %llu\n", call->fd, (long long)call->off); /* Read (not readv!) moves file offset! */ if (lseek(call->fd, call->off, SEEK_SET) != call->off) { fwarn("Restoring lseek pointer failed (read)"); } } } static ssize_t failtest_add_read(int fd, void *buf, size_t count, off_t off, bool is_pread, const char *file, unsigned line) { struct failtest_call *p; struct read_call call; call.fd = fd; call.buf = buf; call.count = count; call.off = off; p = add_history(FAILTEST_READ, false, file, line, &call); /* FIXME: Try partial read returns. */ if (should_fail(p)) { p->u.read.ret = -1; p->error = EIO; } else { if (is_pread) p->u.read.ret = pread(fd, buf, count, off); else { p->u.read.ret = read(fd, buf, count); if (p->u.read.ret != -1) set_cleanup(p, cleanup_read, struct read_call); } } trace("%sread %s:%u fd %i %zu@%llu -> %zd\n", is_pread ? "p" : "", file, line, fd, count, (long long)off, p->u.read.ret); errno = p->error; return p->u.read.ret; } static void cleanup_write(struct write_call *write, bool restore) { trace("cleaning up write on %s\n", write->opener->u.open.pathname); if (restore) restore_contents(write->opener, write->saved, !write->is_pwrite, "write"); free(write->saved); } static ssize_t failtest_add_write(int fd, const void *buf, size_t count, off_t off, bool is_pwrite, const char *file, unsigned line) { struct failtest_call *p; struct write_call call; call.fd = fd; call.buf = buf; call.count = count; call.off = off; call.is_pwrite = is_pwrite; call.opener = opener_of(fd); p = add_history(FAILTEST_WRITE, false, file, line, &call); /* If we're a child, we need to make sure we write the same thing * to non-files as the parent does, so tell it. */ if (control_fd != -1 && off == (off_t)-1) { enum info_type type = WRITE; write_all(control_fd, &type, sizeof(type)); write_all(control_fd, &p->u.write, sizeof(p->u.write)); write_all(control_fd, buf, count); } /* FIXME: Try partial write returns. */ if (should_fail(p)) { p->u.write.ret = -1; p->error = EIO; } else { bool is_file; assert(call.opener == p->u.write.opener); if (p->u.write.opener) { is_file = (p->u.write.opener->type == FAILTEST_OPEN); } else { /* We can't unwind it, so at least check same * in parent and child. */ is_file = false; } /* FIXME: We assume same write order in parent and child */ if (!is_file && child_writes_num != 0) { if (child_writes[0].fd != fd) errx(1, "Child wrote to fd %u, not %u?", child_writes[0].fd, fd); if (child_writes[0].off != p->u.write.off) errx(1, "Child wrote to offset %zu, not %zu?", (size_t)child_writes[0].off, (size_t)p->u.write.off); if (child_writes[0].count != count) errx(1, "Child wrote length %zu, not %zu?", child_writes[0].count, count); if (memcmp(child_writes[0].buf, buf, count)) { child_fail(NULL, 0, "Child wrote differently to" " fd %u than we did!\n", fd); } free((char *)child_writes[0].buf); child_writes_num--; memmove(&child_writes[0], &child_writes[1], sizeof(child_writes[0]) * child_writes_num); /* Child wrote it already. */ trace("write %s:%i on fd %i already done by child\n", file, line, fd); p->u.write.ret = count; errno = p->error; return p->u.write.ret; } if (is_file) { p->u.write.saved = save_contents(call.opener->u.open.pathname, fd, count, off, "being overwritten"); set_cleanup(p, cleanup_write, struct write_call); } /* Though off is current seek ptr for write case, we need to * move it. write() does that for us. */ if (p->u.write.is_pwrite) p->u.write.ret = pwrite(fd, buf, count, off); else p->u.write.ret = write(fd, buf, count); } trace("%swrite %s:%i %zu@%llu on fd %i -> %zd\n", p->u.write.is_pwrite ? "p" : "", file, line, count, (long long)off, fd, p->u.write.ret); errno = p->error; return p->u.write.ret; } ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t offset, const char *file, unsigned line) { return failtest_add_write(fd, buf, count, offset, true, file, line); } ssize_t failtest_write(int fd, const void *buf, size_t count, const char *file, unsigned line) { return failtest_add_write(fd, buf, count, lseek(fd, 0, SEEK_CUR), false, file, line); } ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off, const char *file, unsigned line) { return failtest_add_read(fd, buf, count, off, true, file, line); } ssize_t failtest_read(int fd, void *buf, size_t count, const char *file, unsigned line) { return failtest_add_read(fd, buf, count, lseek(fd, 0, SEEK_CUR), false, file, line); } static struct lock_info *WARN_UNUSED_RESULT add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type) { unsigned int i; struct lock_info *l; for (i = 0; i < lock_num; i++) { l = &locks[i]; if (l->fd != fd) continue; /* Four cases we care about: * Start overlap: * l = | | * new = | | * Mid overlap: * l = | | * new = | | * End overlap: * l = | | * new = | | * Total overlap: * l = | | * new = | | */ if (start > l->start && end < l->end) { /* Mid overlap: trim entry, add new one. */ off_t new_start, new_end; new_start = end + 1; new_end = l->end; trace("splitting lock on fd %i from %llu-%llu" " to %llu-%llu\n", fd, (long long)l->start, (long long)l->end, (long long)l->start, (long long)start - 1); l->end = start - 1; locks = add_lock(locks, fd, new_start, new_end, l->type); l = &locks[i]; } else if (start <= l->start && end >= l->end) { /* Total overlap: eliminate entry. */ trace("erasing lock on fd %i %llu-%llu\n", fd, (long long)l->start, (long long)l->end); l->end = 0; l->start = 1; } else if (end >= l->start && end < l->end) { trace("trimming lock on fd %i from %llu-%llu" " to %llu-%llu\n", fd, (long long)l->start, (long long)l->end, (long long)end + 1, (long long)l->end); /* Start overlap: trim entry. */ l->start = end + 1; } else if (start > l->start && start <= l->end) { trace("trimming lock on fd %i from %llu-%llu" " to %llu-%llu\n", fd, (long long)l->start, (long long)l->end, (long long)l->start, (long long)start - 1); /* End overlap: trim entry. */ l->end = start-1; } /* Nothing left? Remove it. */ if (l->end < l->start) { trace("forgetting lock on fd %i\n", fd); memmove(l, l + 1, (--lock_num - i) * sizeof(l[0])); i--; } } if (type != F_UNLCK) { locks = realloc(locks, (lock_num + 1) * sizeof(*locks)); l = &locks[lock_num++]; l->fd = fd; l->start = start; l->end = end; l->type = type; trace("new lock on fd %i %llu-%llu\n", fd, (long long)l->start, (long long)l->end); } return locks; } /* We trap this so we can record it: we don't fail it. */ int failtest_close(int fd, const char *file, unsigned line) { struct close_call call; struct failtest_call *p, *opener; /* Do this before we add ourselves to history! */ opener = opener_of(fd); call.fd = fd; p = add_history(FAILTEST_CLOSE, false, file, line, &call); p->fail = false; /* Consume close from failpath (shouldn't tell us to fail). */ if (following_path()) { if (follow_path(p)) abort(); } trace("close on fd %i\n", fd); if (fd < 0) return close(fd); /* Mark opener as not leaking, remove its cleanup function. */ if (opener) { trace("close on fd %i found opener %p\n", fd, opener); if (opener->type == FAILTEST_PIPE) { /* From a pipe? */ if (opener->u.pipe.fds[0] == fd) { assert(!opener->u.pipe.closed[0]); opener->u.pipe.closed[0] = true; } else if (opener->u.pipe.fds[1] == fd) { assert(!opener->u.pipe.closed[1]); opener->u.pipe.closed[1] = true; } else abort(); opener->can_leak = (!opener->u.pipe.closed[0] || !opener->u.pipe.closed[1]); } else if (opener->type == FAILTEST_OPEN) { opener->u.open.closed = true; opener->can_leak = false; } else abort(); } /* Restore offset now, in case parent shared (can't do after close!). */ if (control_fd != -1) { struct failtest_call *i; tlist_for_each_rev(&history, i, list) { if (i == our_history_start) break; if (i == opener) break; if (i->type == FAILTEST_LSEEK && i->u.lseek.fd == fd) { trace("close on fd %i undoes lseek\n", fd); /* This seeks back. */ i->cleanup(&i->u, true); i->cleanup = NULL; } else if (i->type == FAILTEST_WRITE && i->u.write.fd == fd && !i->u.write.is_pwrite) { trace("close on fd %i undoes write" " offset change\n", fd); /* Write (not pwrite!) moves file offset! */ if (lseek(fd, i->u.write.off, SEEK_SET) != i->u.write.off) { fwarn("Restoring lseek pointer failed (write)"); } } else if (i->type == FAILTEST_READ && i->u.read.fd == fd) { /* preads don't *have* cleanups */ if (i->cleanup) { trace("close on fd %i undoes read" " offset change\n", fd); /* This seeks back. */ i->cleanup(&i->u, true); i->cleanup = NULL; } } } } /* Close unlocks everything. */ locks = add_lock(locks, fd, 0, off_max(), F_UNLCK); return close(fd); } /* Zero length means "to end of file" */ static off_t end_of(off_t start, off_t len) { if (len == 0) return off_max(); return start + len - 1; } /* FIXME: This only handles locks, really. */ int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...) { struct failtest_call *p; struct fcntl_call call; va_list ap; call.fd = fd; call.cmd = cmd; /* Argument extraction. */ switch (cmd) { case F_SETFL: case F_SETFD: va_start(ap, cmd); call.arg.l = va_arg(ap, long); va_end(ap); trace("fcntl on fd %i F_SETFL/F_SETFD\n", fd); return fcntl(fd, cmd, call.arg.l); case F_GETFD: case F_GETFL: trace("fcntl on fd %i F_GETFL/F_GETFD\n", fd); return fcntl(fd, cmd); case F_GETLK: trace("fcntl on fd %i F_GETLK\n", fd); get_locks(); va_start(ap, cmd); call.arg.fl = *va_arg(ap, struct flock *); va_end(ap); return fcntl(fd, cmd, &call.arg.fl); case F_SETLK: case F_SETLKW: trace("fcntl on fd %i F_SETLK%s\n", fd, cmd == F_SETLKW ? "W" : ""); va_start(ap, cmd); call.arg.fl = *va_arg(ap, struct flock *); va_end(ap); break; default: /* This means you need to implement it here. */ err(1, "failtest: unknown fcntl %u", cmd); } p = add_history(FAILTEST_FCNTL, false, file, line, &call); if (should_fail(p)) { p->u.fcntl.ret = -1; if (p->u.fcntl.cmd == F_SETLK) p->error = EAGAIN; else p->error = EDEADLK; } else { get_locks(); p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd, &p->u.fcntl.arg.fl); if (p->u.fcntl.ret == -1) p->error = errno; else { /* We don't handle anything else yet. */ assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET); locks = add_lock(locks, p->u.fcntl.fd, p->u.fcntl.arg.fl.l_start, end_of(p->u.fcntl.arg.fl.l_start, p->u.fcntl.arg.fl.l_len), p->u.fcntl.arg.fl.l_type); } } trace("fcntl on fd %i -> %i\n", fd, p->u.fcntl.ret); errno = p->error; return p->u.fcntl.ret; } static void cleanup_lseek(struct lseek_call *call, bool restore) { if (restore) { trace("cleaning up lseek on fd %i -> %llu\n", call->fd, (long long)call->old_off); if (lseek(call->fd, call->old_off, SEEK_SET) != call->old_off) fwarn("Restoring lseek pointer failed"); } } /* We trap this so we can undo it: we don't fail it. */ off_t failtest_lseek(int fd, off_t offset, int whence, const char *file, unsigned int line) { struct failtest_call *p; struct lseek_call call; call.fd = fd; call.offset = offset; call.whence = whence; call.old_off = lseek(fd, 0, SEEK_CUR); p = add_history(FAILTEST_LSEEK, false, file, line, &call); p->fail = false; /* Consume lseek from failpath. */ if (failpath) if (should_fail(p)) abort(); p->u.lseek.ret = lseek(fd, offset, whence); if (p->u.lseek.ret != (off_t)-1) set_cleanup(p, cleanup_lseek, struct lseek_call); trace("lseek %s:%u on fd %i from %llu to %llu%s\n", file, line, fd, (long long)call.old_off, (long long)offset, whence == SEEK_CUR ? " (from current off)" : whence == SEEK_END ? " (from end)" : whence == SEEK_SET ? "" : " (invalid whence)"); return p->u.lseek.ret; } pid_t failtest_getpid(const char *file, unsigned line) { /* You must call failtest_init first! */ assert(orig_pid); return orig_pid; } void failtest_init(int argc, char *argv[]) { unsigned int i; orig_pid = getpid(); warnf = fdopen(move_fd_to_high(dup(STDERR_FILENO)), "w"); for (i = 1; i < argc; i++) { if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) { failpath = argv[i] + strlen("--failpath="); } else if (strcmp(argv[i], "--trace") == 0) { tracef = warnf; failtest_timeout_ms = -1; } else if (!strncmp(argv[i], "--debugpath=", strlen("--debugpath="))) { debugpath = argv[i] + strlen("--debugpath="); } } failtable_init(&failtable); start = time_now(); } bool failtest_has_failed(void) { return control_fd != -1; } void failtest_exit(int status) { trace("failtest_exit with status %i\n", status); if (failtest_exit_check) { if (!failtest_exit_check(&history)) child_fail(NULL, 0, "failtest_exit_check failed\n"); } failtest_cleanup(false, status); } ntdb-1.0/lib/ccan/failtest/failtest.d000066400000000000000000000072651224151530700175600ustar00rootroot00000000000000ccan/failtest/failtest.o: ccan/failtest/failtest.c \ ccan/failtest/failtest.h config.h \ /usr/include/i386-linux-gnu/sys/types.h /usr/include/features.h \ /usr/include/i386-linux-gnu/bits/predefs.h \ /usr/include/i386-linux-gnu/sys/cdefs.h \ /usr/include/i386-linux-gnu/bits/wordsize.h \ /usr/include/i386-linux-gnu/gnu/stubs.h \ /usr/include/i386-linux-gnu/gnu/stubs-32.h \ /usr/include/i386-linux-gnu/bits/types.h \ /usr/include/i386-linux-gnu/bits/typesizes.h /usr/include/time.h \ /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stddef.h /usr/include/endian.h \ /usr/include/i386-linux-gnu/bits/endian.h \ /usr/include/i386-linux-gnu/bits/byteswap.h \ /usr/include/i386-linux-gnu/sys/select.h \ /usr/include/i386-linux-gnu/bits/select.h \ /usr/include/i386-linux-gnu/bits/sigset.h \ /usr/include/i386-linux-gnu/bits/time.h \ /usr/include/i386-linux-gnu/sys/sysmacros.h \ /usr/include/i386-linux-gnu/bits/pthreadtypes.h \ /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stdbool.h /usr/include/fcntl.h \ /usr/include/i386-linux-gnu/bits/fcntl.h \ /usr/include/i386-linux-gnu/bits/uio.h \ /usr/include/i386-linux-gnu/bits/stat.h ccan/compiler/compiler.h \ ccan/tlist/tlist.h ccan/list/list.h /usr/include/assert.h \ ccan/container_of/container_of.h ccan/check_type/check_type.h \ ccan/tcon/tcon.h /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stdarg.h \ /usr/include/string.h /usr/include/xlocale.h /usr/include/stdio.h \ /usr/include/libio.h /usr/include/_G_config.h /usr/include/wchar.h \ /usr/include/i386-linux-gnu/bits/stdio_lim.h \ /usr/include/i386-linux-gnu/bits/sys_errlist.h /usr/include/ctype.h \ /usr/include/err.h /usr/include/unistd.h \ /usr/include/i386-linux-gnu/bits/posix_opt.h \ /usr/include/i386-linux-gnu/bits/environments.h \ /usr/include/i386-linux-gnu/bits/confname.h /usr/include/getopt.h \ /usr/include/poll.h /usr/include/i386-linux-gnu/sys/poll.h \ /usr/include/i386-linux-gnu/bits/poll.h /usr/include/errno.h \ /usr/include/i386-linux-gnu/bits/errno.h /usr/include/linux/errno.h \ /usr/include/i386-linux-gnu/asm/errno.h /usr/include/asm-generic/errno.h \ /usr/include/asm-generic/errno-base.h \ /usr/include/i386-linux-gnu/sys/wait.h /usr/include/signal.h \ /usr/include/i386-linux-gnu/bits/signum.h \ /usr/include/i386-linux-gnu/bits/siginfo.h \ /usr/include/i386-linux-gnu/bits/sigaction.h \ /usr/include/i386-linux-gnu/bits/sigcontext.h \ /usr/include/i386-linux-gnu/asm/sigcontext.h /usr/include/linux/types.h \ /usr/include/i386-linux-gnu/asm/types.h /usr/include/asm-generic/types.h \ /usr/include/asm-generic/int-ll64.h \ /usr/include/i386-linux-gnu/asm/bitsperlong.h \ /usr/include/asm-generic/bitsperlong.h /usr/include/linux/posix_types.h \ /usr/include/linux/stddef.h \ /usr/include/i386-linux-gnu/asm/posix_types.h \ /usr/include/i386-linux-gnu/asm/posix_types_32.h \ /usr/include/i386-linux-gnu/bits/sigstack.h \ /usr/include/i386-linux-gnu/sys/ucontext.h \ /usr/include/i386-linux-gnu/bits/sigthread.h \ /usr/include/i386-linux-gnu/sys/resource.h \ /usr/include/i386-linux-gnu/bits/resource.h \ /usr/include/i386-linux-gnu/bits/waitflags.h \ /usr/include/i386-linux-gnu/bits/waitstatus.h \ /usr/include/i386-linux-gnu/sys/stat.h \ /usr/include/i386-linux-gnu/sys/time.h \ /usr/include/i386-linux-gnu/sys/mman.h \ /usr/include/i386-linux-gnu/bits/mman.h ccan/time/time.h \ /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stdint.h /usr/include/stdint.h \ /usr/include/i386-linux-gnu/bits/wchar.h \ ccan/read_write_all/read_write_all.h ccan/failtest/failtest_proto.h \ /usr/include/stdlib.h /usr/include/alloca.h \ ccan/build_assert/build_assert.h ccan/hash/hash.h \ ccan/htable/htable_type.h ccan/htable/htable.h ccan/str/str.h \ ccan/str/str_debug.h /usr/include/execinfo.h ntdb-1.0/lib/ccan/failtest/failtest.h000066400000000000000000000133351224151530700175570ustar00rootroot00000000000000/* Licensed under LGPL - see LICENSE file for details */ #ifndef CCAN_FAILTEST_H #define CCAN_FAILTEST_H #include "config.h" #if HAVE_FILE_OFFSET_BITS #define _FILE_OFFSET_BITS 64 #endif #include #include #include #include #include /** * failtest_init - initialize the failtest module * @argc: the number of commandline arguments * @argv: the commandline argument array * * This initializes the module, and in particular if argv[1] is "--failpath=" * then it ensures that failures follow that pattern. This allows easy * debugging of complex failure paths. */ void failtest_init(int argc, char *argv[]); /** * failtest_exit - clean up and exit the test * @status: the status (usually exit_status() from ccan/tap). * * This cleans up and changes to files made in this child, and exits the test. * It also calls your failtest_default_hook, if any. * * A child which does not exit via failtest_exit() will cause the overall test * to fail. */ void NORETURN failtest_exit(int status); /** * enum failtest_call_type - discriminator for failtest_call.u */ enum failtest_call_type { FAILTEST_MALLOC, FAILTEST_CALLOC, FAILTEST_REALLOC, FAILTEST_OPEN, FAILTEST_CLOSE, FAILTEST_PIPE, FAILTEST_READ, FAILTEST_WRITE, FAILTEST_FCNTL, FAILTEST_MMAP, FAILTEST_LSEEK }; struct calloc_call { void *ret; size_t nmemb; size_t size; }; struct malloc_call { void *ret; size_t size; }; struct realloc_call { void *ret; void *ptr; size_t size; }; struct open_call { int ret; const char *pathname; int flags; mode_t mode; bool always_save; bool closed; /* This is used for O_TRUNC opens on existing files. */ struct contents_saved *saved; }; struct close_call { int fd; }; struct pipe_call { int ret; int fds[2]; bool closed[2]; }; struct read_call { ssize_t ret; off_t off; int fd; void *buf; size_t count; }; struct write_call { ssize_t ret; int fd; const void *buf; size_t count; off_t off; bool is_pwrite; struct failtest_call *opener; struct contents_saved *saved; }; struct fcntl_call { int ret; int fd; int cmd; union { struct flock fl; long l; int i; } arg; }; struct mmap_call { void *ret; void *addr; size_t length; int prot; int flags; int fd; off_t offset; struct failtest_call *opener; struct contents_saved *saved; }; struct lseek_call { ssize_t ret; int fd; off_t offset; int whence; off_t old_off; }; /** * struct failtest_call - description of a call redirected to failtest module * @type: the call type * @file: the filename of the caller * @line: the line number of the caller * @fail: did this call fail * @error: the errno (if any) * @u: the union of call data * * This structure is used to represent the ordered history of calls. * * See Also: * failtest_hook, failtest_exit_check */ struct failtest_call { /* We're in the history list. */ struct ccan_list_node list; enum failtest_call_type type; /* Where we were called from. */ const char *file; unsigned int line; /* Did we fail? */ bool fail; /* What we set errno to. */ int error; /* How do we clean this up? */ void (*cleanup)(void *u, bool restore); /* Should their program have cleaned up? */ bool can_leak; /* Backtrace of call chain. */ void **backtrace; unsigned int backtrace_num; /* The actual call data. */ union { struct calloc_call calloc; struct malloc_call malloc; struct realloc_call realloc; struct open_call open; struct close_call close; struct pipe_call pipe; struct read_call read; struct write_call write; struct fcntl_call fcntl; struct mmap_call mmap; struct lseek_call lseek; } u; }; /* This defines struct tlist_calls. */ TLIST_TYPE(calls, struct failtest_call); enum failtest_result { /* Yes try failing this call. */ FAIL_OK, /* No, don't try failing this call. */ FAIL_DONT_FAIL, /* Try failing this call but don't go too far down that path. */ FAIL_PROBE, }; /** * failtest_hook - whether a certain call should fail or not. * @history: the ordered history of all failtest calls. * * The default value of this hook is failtest_default_hook(), which returns * FAIL_OK (ie. yes, fail the call). * * You can override it, and avoid failing certain calls. The parameters * of the call (but not the return value(s)) will be filled in for the last * call. * * Example: * static enum failtest_result dont_fail_alloc(struct tlist_calls *history) * { * struct failtest_call *call; * call = tlist_tail(history, list); * if (call->type == FAILTEST_MALLOC * || call->type == FAILTEST_CALLOC * || call->type == FAILTEST_REALLOC) * return FAIL_DONT_FAIL; * return FAIL_OK; * } * ... * failtest_hook = dont_fail_alloc; */ extern enum failtest_result (*failtest_hook)(struct tlist_calls *history); /** * failtest_exit_check - hook for additional checks on a failed child. * @history: the ordered history of all failtest calls. * * Your program might have additional checks to do on failure, such as * check that a file is not corrupted, or than an error message has been * logged. * * If this returns false, the path to this failure will be printed and the * overall test will fail. */ extern bool (*failtest_exit_check)(struct tlist_calls *history); /** * failtest_has_failed - determine if a failure has occurred. * * Sometimes you want to exit immediately if you've experienced an * injected failure. This is useful when you have four separate tests * in your test suite, and you don't want to do the next one if you've * had a failure in a previous one. */ extern bool failtest_has_failed(void); /** * failtest_timeout_ms - how long to wait before killing child. * * Default is 20,000 (20 seconds). */ extern unsigned int failtest_timeout_ms; #endif /* CCAN_FAILTEST_H */ ntdb-1.0/lib/ccan/failtest/failtest_override.h000066400000000000000000000045041224151530700214540ustar00rootroot00000000000000/* Licensed under LGPL - see LICENSE file for details */ #ifndef CCAN_FAILTEST_OVERRIDE_H #define CCAN_FAILTEST_OVERRIDE_H /* This file is included before the source file to test. */ #include "config.h" #if HAVE_FILE_OFFSET_BITS #define _FILE_OFFSET_BITS 64 #endif /* Replacement of allocators. */ #include #ifdef HAVE_MALLOC_H #include #endif #undef calloc #define calloc(nmemb, size) \ failtest_calloc((nmemb), (size), __FILE__, __LINE__) #undef malloc #define malloc(size) \ failtest_malloc((size), __FILE__, __LINE__) #undef realloc #define realloc(ptr, size) \ failtest_realloc((ptr), (size), __FILE__, __LINE__) #undef free #define free(ptr) \ failtest_free(ptr) /* Replacement of I/O. */ #include #include #include #include #include #undef open #define open(pathname, ...) \ failtest_open((pathname), __FILE__, __LINE__, __VA_ARGS__) #undef pipe #define pipe(pipefd) \ failtest_pipe((pipefd), __FILE__, __LINE__) #undef read #define read(fd, buf, count) \ failtest_read((fd), (buf), (count), __FILE__, __LINE__) #undef write #define write(fd, buf, count) \ failtest_write((fd), (buf), (count), __FILE__, __LINE__) #undef pread #define pread(fd, buf, count, off) \ failtest_pread((fd), (buf), (count), (off), __FILE__, __LINE__) #undef pwrite #define pwrite(fd, buf, count, off) \ failtest_pwrite((fd), (buf), (count), (off), __FILE__, __LINE__) #undef close #define close(fd) failtest_close(fd, __FILE__, __LINE__) #undef fcntl #define fcntl(fd, ...) failtest_fcntl((fd), __FILE__, __LINE__, __VA_ARGS__) #undef mmap /* OpenBSD doesn't idempotent-protect sys/mman.h, so we can't add args. */ #ifdef __OpenBSD__ #define mmap(addr, length, prot, flags, fd, offset) \ failtest_mmap_noloc((addr), (length), (prot), (flags), (fd), (offset)) #else #define mmap(addr, length, prot, flags, fd, offset) \ failtest_mmap((addr), (length), (prot), (flags), (fd), (offset), \ __FILE__, __LINE__) #endif /* !__OpenBSD__ */ #undef lseek #define lseek(fd, offset, whence) \ failtest_lseek((fd), (offset), (whence), __FILE__, __LINE__) /* Replacement of getpid (since failtest will fork). */ #undef getpid #define getpid() failtest_getpid(__FILE__, __LINE__) #include #endif /* CCAN_FAILTEST_OVERRIDE_H */ ntdb-1.0/lib/ccan/failtest/failtest_proto.h000066400000000000000000000031301224151530700207720ustar00rootroot00000000000000/* Licensed under LGPL - see LICENSE file for details */ #ifndef CCAN_FAILTEST_PROTO_H #define CCAN_FAILTEST_PROTO_H #include /* Potentially-failing versions of routines; #defined in failtest.h */ void *failtest_calloc(size_t nmemb, size_t size, const char *file, unsigned line); void *failtest_malloc(size_t size, const char *file, unsigned line); void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line); void failtest_free(void *ptr); int failtest_open(const char *pathname, const char *file, unsigned line, ...); int failtest_pipe(int pipefd[2], const char *file, unsigned line); ssize_t failtest_read(int fd, void *buf, size_t count, const char *file, unsigned line); ssize_t failtest_write(int fd, const void *buf, size_t count, const char *file, unsigned line); ssize_t failtest_pread(int fd, void *buf, size_t count, off_t offset, const char *file, unsigned line); ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t offset, const char *file, unsigned line); void *failtest_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset, const char *file, unsigned line); void *failtest_mmap_noloc(void *addr, size_t length, int prot, int flags, int fd, off_t offset); off_t failtest_lseek(int fd, off_t offset, int whence, const char *file, unsigned line); int failtest_close(int fd, const char *file, unsigned line); int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...); pid_t failtest_getpid(const char *file, unsigned line); #endif /* CCAN_FAILTEST_PROTO_H */ ntdb-1.0/lib/ccan/failtest/failtest_undo.h000066400000000000000000000022401224151530700205750ustar00rootroot00000000000000/* Licensed under LGPL - see LICENSE file for details */ #ifndef CCAN_FAILTEST_RESTORE_H #define CCAN_FAILTEST_RESTORE_H /* This file undoes the effect of failtest_override.h. */ #undef calloc #define calloc(nmemb, size) \ failtest_calloc((nmemb), (size), NULL, 0) #undef malloc #define malloc(size) \ failtest_malloc((size), NULL, 0) #undef realloc #define realloc(ptr, size) \ failtest_realloc((ptr), (size), NULL, 0) #undef open #define open(pathname, ...) \ failtest_open((pathname), NULL, 0, __VA_ARGS__) #undef pipe #define pipe(pipefd) \ failtest_pipe((pipefd), NULL, 0) #undef read #define read(fd, buf, count) \ failtest_read((fd), (buf), (count), NULL, 0) #undef write #define write(fd, buf, count) \ failtest_write((fd), (buf), (count), NULL, 0) #undef mmap #define mmap(addr, length, prot, flags, fd, offset) \ failtest_mmap((addr), (length), (prot), (flags), (fd), (offset), NULL, 0) #undef lseek #define lseek(fd, off, whence) \ failtest_lseek((fd), (off), (whence), NULL, 0) #undef close #define close(fd) failtest_close(fd) #undef fcntl #define fcntl(fd, ...) \ failtest_fcntl((fd), NULL, 0, __VA_ARGS__) #endif /* CCAN_FAILTEST_RESTORE_H */ ntdb-1.0/lib/ccan/failtest/test/000077500000000000000000000000001224151530700165455ustar00rootroot00000000000000ntdb-1.0/lib/ccan/failtest/test/run-failpath.c000066400000000000000000000023771224151530700213140ustar00rootroot00000000000000#include #include #include #include #include #include int main(void) { int fds[2], fd; void *p; plan_tests(14); failtest_init(0, NULL); failpath = "mceopwrMCEOPWR"; ok1((p = failtest_malloc(10, "run-failpath.c", 1)) != NULL); ok1(failtest_calloc(10, 5, "run-failpath.c", 1) != NULL); ok1((p = failtest_realloc(p, 100, "run-failpath.c", 1)) != NULL); ok1((fd = failtest_open("failpath-scratch", "run-failpath.c", 1, O_RDWR|O_CREAT, 0600)) >= 0); ok1(failtest_pipe(fds, "run-failpath.c", 1) == 0); ok1(failtest_write(fd, "xxxx", 4, "run-failpath.c", 1) == 4); lseek(fd, 0, SEEK_SET); ok1(failtest_read(fd, p, 5, "run-failpath.c", 1) == 4); /* Now we're into the failures. */ ok1(failtest_malloc(10, "run-failpath.c", 1) == NULL); ok1(failtest_calloc(10, 5, "run-failpath.c", 1) == NULL); ok1(failtest_realloc(p, 100, "run-failpath.c", 1) == NULL); ok1(failtest_open("failpath-scratch", "run-failpath.c", 1, O_RDWR|O_CREAT, 0600) == -1); ok1(failtest_pipe(fds, "run-failpath.c", 1) == -1); ok1(failtest_write(fd, "xxxx", 4, "run-failpath.c", 1) == -1); lseek(fd, 0, SEEK_SET); ok1(failtest_read(fd, p, 5, "run-failpath.c", 1) == -1); return exit_status(); } ntdb-1.0/lib/ccan/failtest/test/run-history.c000066400000000000000000000130701224151530700212150ustar00rootroot00000000000000/* Include the C files directly. */ #include #include #include #include #include int main(void) { struct failtest_call *call; struct calloc_call calloc_call; struct malloc_call malloc_call; struct realloc_call realloc_call; struct open_call open_call; struct pipe_call pipe_call; struct read_call read_call; struct write_call write_call; struct mmap_call mmap_call; char buf[20]; unsigned int i; char *path; /* This is how many tests you plan to run */ plan_tests(69); calloc_call.ret = calloc(1, 2); calloc_call.nmemb = 1; calloc_call.size = 2; call = add_history(FAILTEST_CALLOC, true, "run-history.c", 1, &calloc_call); /* Normally should_fail would set this. */ call->fail = false; ok1(call->type == FAILTEST_CALLOC); ok1(call->can_leak == true); ok1(strcmp(call->file, "run-history.c") == 0); ok1(call->line == 1); ok1(call->u.calloc.ret == calloc_call.ret); ok1(call->u.calloc.nmemb == calloc_call.nmemb); ok1(call->u.calloc.size == calloc_call.size); malloc_call.ret = malloc(2); malloc_call.size = 2; call = add_history(FAILTEST_MALLOC, true, "run-history.c", 2, &malloc_call); /* Normally should_fail would set this. */ call->fail = false; ok1(call->type == FAILTEST_MALLOC); ok1(call->can_leak == true); ok1(strcmp(call->file, "run-history.c") == 0); ok1(call->line == 2); ok1(call->u.malloc.ret == malloc_call.ret); ok1(call->u.malloc.size == malloc_call.size); realloc_call.ret = realloc(malloc_call.ret, 3); realloc_call.ptr = malloc_call.ret; realloc_call.size = 3; call = add_history(FAILTEST_REALLOC, true, "run-history.c", 3, &realloc_call); /* Normally should_fail would set this. */ call->fail = false; ok1(call->type == FAILTEST_REALLOC); ok1(call->can_leak == true); ok1(strcmp(call->file, "run-history.c") == 0); ok1(call->line == 3); ok1(call->u.realloc.ret == realloc_call.ret); ok1(call->u.realloc.ptr == realloc_call.ptr); ok1(call->u.realloc.size == realloc_call.size); open_call.ret = open("test/run-history.c", O_RDONLY); open_call.pathname = "test/run-history.c"; open_call.flags = O_RDONLY; open_call.mode = 0; open_call.closed = false; call = add_history(FAILTEST_OPEN, true, "run-history.c", 4, &open_call); /* Normally should_fail would set this. */ call->fail = false; ok1(call->type == FAILTEST_OPEN); ok1(call->can_leak == true); ok1(strcmp(call->file, "run-history.c") == 0); ok1(call->line == 4); ok1(call->u.open.ret == open_call.ret); ok1(strcmp(call->u.open.pathname, open_call.pathname) == 0); ok1(call->u.open.flags == open_call.flags); ok1(call->u.open.mode == open_call.mode); pipe_call.ret = pipe(pipe_call.fds); call = add_history(FAILTEST_PIPE, true, "run-history.c", 5, &pipe_call); /* Normally should_fail would set this. */ call->fail = false; ok1(call->type == FAILTEST_PIPE); ok1(strcmp(call->file, "run-history.c") == 0); ok1(call->can_leak == true); ok1(call->line == 5); ok1(call->u.pipe.ret == pipe_call.ret); ok1(call->u.pipe.fds[0] == pipe_call.fds[0]); ok1(call->u.pipe.fds[1] == pipe_call.fds[1]); read_call.ret = read(open_call.ret, buf, 20); read_call.buf = buf; read_call.fd = open_call.ret; read_call.count = 20; call = add_history(FAILTEST_READ, false, "run-history.c", 6, &read_call); /* Normally should_fail would set this. */ call->fail = false; ok1(call->type == FAILTEST_READ); ok1(call->can_leak == false); ok1(strcmp(call->file, "run-history.c") == 0); ok1(call->line == 6); ok1(call->u.read.ret == read_call.ret); ok1(call->u.read.buf == read_call.buf); ok1(call->u.read.fd == read_call.fd); ok1(call->u.read.count == read_call.count); write_call.ret = 20; write_call.buf = buf; write_call.fd = open_call.ret; write_call.count = 20; write_call.opener = NULL; call = add_history(FAILTEST_WRITE, false, "run-history.c", 7, &write_call); /* Normally should_fail would set this. */ call->fail = false; ok1(call->type == FAILTEST_WRITE); ok1(call->can_leak == false); ok1(strcmp(call->file, "run-history.c") == 0); ok1(call->line == 7); ok1(call->u.write.ret == write_call.ret); ok1(call->u.write.buf == write_call.buf); ok1(call->u.write.fd == write_call.fd); ok1(call->u.write.count == write_call.count); ok1(call->u.write.opener == write_call.opener); mmap_call.ret = &mmap_call; mmap_call.addr = NULL; mmap_call.length = 4096; mmap_call.prot = PROT_READ; mmap_call.flags = 0; mmap_call.fd = open_call.ret; mmap_call.offset = 0; mmap_call.opener = opener_of(open_call.ret); ok1(mmap_call.opener->type == FAILTEST_OPEN); mmap_call.saved = NULL; call = add_history(FAILTEST_MMAP, false, "run-history.c", 8, &mmap_call); /* Normally should_fail would set this. */ call->fail = false; ok1(call->type == FAILTEST_MMAP); ok1(call->can_leak == false); ok1(strcmp(call->file, "run-history.c") == 0); ok1(call->line == 8); ok1(call->u.mmap.ret == mmap_call.ret); ok1(call->u.mmap.addr == mmap_call.addr); ok1(call->u.mmap.length == mmap_call.length); ok1(call->u.mmap.prot == mmap_call.prot); ok1(call->u.mmap.flags == mmap_call.flags); ok1(call->u.mmap.fd == mmap_call.fd); ok1(call->u.mmap.offset == mmap_call.offset); ok1(call->u.mmap.opener == mmap_call.opener); ok1(call->u.mmap.saved == mmap_call.saved); i = 0; tlist_for_each(&history, call, list) i++; ok1(i == 8); tlist_for_each(&history, call, list) call->fail = false; path = failpath_string(); ok1(streq(path, "cmeoprwa")); free(path); tlist_for_each(&history, call, list) call->fail = true; path = failpath_string(); ok1(streq(path, "CMEOPRWA")); free(path); return exit_status(); } ntdb-1.0/lib/ccan/failtest/test/run-locking.c000066400000000000000000000053101224151530700211400ustar00rootroot00000000000000/* Include the C files directly. */ #include #include #include #include #include #include #include #define SIZE 8 /* We don't want to fork and fail; we're just testing lock recording. */ static enum failtest_result dont_fail(struct tlist_calls *history) { return FAIL_DONT_FAIL; } static bool place_lock(int fd, char lockarr[], unsigned pos, unsigned size, int type) { struct flock fl; /* Update record keeping. */ if (type == F_RDLCK) memset(lockarr+pos, 1, size); else if (type == F_WRLCK) memset(lockarr+pos, 2, size); else memset(lockarr+pos, 0, size); fl.l_whence = SEEK_SET; fl.l_type = type; fl.l_start = pos; fl.l_len = size; return failtest_fcntl(fd, "run-locking.c", 1, F_SETLK, &fl) == 0; } static char lock_lookup(int fd, unsigned pos) { char ret = 0; unsigned int i; struct lock_info *l; for (i = 0; i < lock_num; i++) { l = &locks[i]; if (l->fd != fd) continue; if (pos >= l->start && pos <= l->end) { if (ret) ret = 3; else if (l->type == F_RDLCK) ret = 1; else ret = 2; } } return ret; } static bool test(int fd, unsigned p1, unsigned s1, unsigned p2, unsigned s2, unsigned p3, unsigned s3) { unsigned int i; char lockarr[SIZE]; memset(lockarr, 0, sizeof(lockarr)); if (!place_lock(fd, lockarr, p1, s1, F_WRLCK)) return false; if (!place_lock(fd, lockarr, p2, s2, F_RDLCK)) return false; if (!place_lock(fd, lockarr, p3, s3, F_UNLCK)) return false; for (i = 0; i < SIZE; i++) { if (lock_lookup(fd, i) != lockarr[i]) return false; } /* Reset lock info. */ lock_num = 0; return true; } int main(void) { int fd; long flags; unsigned int isize; plan_tests(5835); failtest_init(0, NULL); failtest_hook = dont_fail; fd = open("run-locking-scratch", O_RDWR|O_CREAT, 0600); /* GETFL and SETFL wrappers should pass through. */ flags = fcntl(fd, F_GETFL); ok1(failtest_fcntl(fd, "run-locking.c", 1, F_GETFL) == flags); flags |= O_NONBLOCK; ok1(failtest_fcntl(fd, "run-locking.c", 1, F_SETFL, flags) == 0); ok1(failtest_fcntl(fd, "run-locking.c", 1, F_GETFL) == flags); for (isize = 1; isize < 4; isize++) { unsigned int ipos; for (ipos = 0; ipos + isize < SIZE; ipos++) { unsigned int jsize; for (jsize = 1; jsize < 4; jsize++) { unsigned int jpos; for (jpos = 0; jpos + jsize < SIZE; jpos++) { unsigned int ksize; for (ksize = 1; ksize < 4; ksize++) { unsigned int kpos; for (kpos = 0; kpos + ksize < SIZE; kpos++) { ok1(test(fd, ipos, isize, jpos, jsize, kpos, ksize)); } } } } } } return exit_status(); } ntdb-1.0/lib/ccan/failtest/test/run-malloc.c000066400000000000000000000043501224151530700207640ustar00rootroot00000000000000#include "config.h" #include #include #include #include #include #include /* We don't actually want it to exit... */ static jmp_buf exited; #define exit(status) longjmp(exited, (status) + 1) #define printf saved_printf static int saved_printf(const char *fmt, ...); #define fprintf saved_fprintf static int saved_fprintf(FILE *ignored, const char *fmt, ...); #define vfprintf saved_vfprintf static int saved_vfprintf(FILE *ignored, const char *fmt, va_list ap); /* Hack to avoid a memory leak which valgrind complains about. */ #define realloc set_realloc static void *set_realloc(void *ptr, size_t size); #define free set_free static void set_free(void *ptr); /* Include the C files directly. */ #include #undef realloc #undef free static char *buffer; static void *set_realloc(void *ptr, size_t size) { return buffer = realloc(ptr, size); } static void set_free(void *ptr) { if (ptr == buffer) buffer = NULL; free(ptr); } static char *output = NULL; static int saved_vprintf(const char *fmt, va_list ap) { int ret; int len = 0; va_list ap2; va_copy(ap2, ap); ret = vsnprintf(NULL, 0, fmt, ap2); va_end(ap2); if (output) len = strlen(output); output = realloc(output, len + ret + 1); return vsprintf(output + len, fmt, ap); } static int saved_vfprintf(FILE *ignored, const char *fmt, va_list ap) { return saved_vprintf(fmt, ap); } static int saved_printf(const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = saved_vprintf(fmt, ap); va_end(ap); return ret; } static int saved_fprintf(FILE *ignored, const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = saved_vprintf(fmt, ap); va_end(ap); return ret; } int main(void) { int status; plan_tests(3); failtest_init(0, NULL); status = setjmp(exited); if (status == 0) { char *p = failtest_malloc(1, "run-malloc.c", 1); /* If we just segv, valgrind counts that as a failure. * So kill ourselves creatively. */ if (!p) kill(getpid(), SIGSEGV); fail("Expected child to crash!"); } else { ok1(status == 2); ok1(strstr(output, "Killed by signal")); ok1(strstr(output, "--failpath=M\n")); } free(buffer); return exit_status(); } ntdb-1.0/lib/ccan/failtest/test/run-open.c000066400000000000000000000034411224151530700204560ustar00rootroot00000000000000/* Include the C files directly. */ #include #include #include #include #include #include #include int main(void) { int fd, pfd[2], err; char buf[] = "Hello world!"; struct stat st; plan_tests(12); failtest_init(0, NULL); if (pipe(pfd)) abort(); fd = failtest_open("run-open-scratchpad", "run-open.c", 1, O_RDWR|O_CREAT, 0600); if (fd == -1) { /* We are the child: write error code for parent to check. */ err = errno; if (write(pfd[1], &err, sizeof(err)) != sizeof(err)) abort(); failtest_exit(0); } /* Check it is read-write. */ ok1(write(fd, buf, strlen(buf)) == strlen(buf)); lseek(fd, SEEK_SET, 0); ok1(read(fd, buf, strlen("Hello world!")) == strlen("Hello world!")); ok1(strcmp(buf, "Hello world!") == 0); /* Check name and perms. */ ok1(stat("run-open-scratchpad", &st) == 0); ok1(st.st_size == strlen(buf)); ok1(S_ISREG(st.st_mode)); ok1((st.st_mode & 0777) == 0600); /* Check child got correct errno. */ ok1(read(pfd[0], &err, sizeof(err)) == sizeof(err)); ok1(err == EACCES); /* Clean up. */ failtest_close(fd, "run-open.c", 1); close(pfd[0]); close(pfd[1]); /* Two-arg open. */ if (pipe(pfd) != 0) abort(); fd = failtest_open("run-open-scratchpad", "run-open.c", 1, O_RDONLY); if (fd == -1) { /* We are the child: write error code for parent to check. */ err = errno; if (write(pfd[1], &err, sizeof(err)) != sizeof(err)) abort(); failtest_exit(0); } /* Check it is read-only. */ ok1(write(fd, buf, strlen(buf)) == -1); ok1(read(fd, buf, strlen("Hello world!")) == strlen("Hello world!")); ok1(strcmp(buf, "Hello world!") == 0); /* Clean up. */ failtest_close(fd, "run-open.c", 1); close(pfd[0]); close(pfd[1]); return exit_status(); } ntdb-1.0/lib/ccan/failtest/test/run-with-fdlimit.c000066400000000000000000000023351224151530700221170ustar00rootroot00000000000000/* Include the C files directly. */ #include #include #include #include int main(void) { int fd, pfd[2], ecode; struct rlimit lim; if (getrlimit(RLIMIT_NOFILE, &lim) != 0) err(1, "getrlimit RLIMIT_NOFILE fail?"); printf("rlimit = %lu/%lu (inf=%lu)\n", (long)lim.rlim_cur, (long)lim.rlim_max, (long)RLIM_INFINITY); lim.rlim_cur /= 2; if (lim.rlim_cur < 8) errx(1, "getrlimit limit %li too low", (long)lim.rlim_cur); if (setrlimit(RLIMIT_NOFILE, &lim) != 0) err(1, "setrlimit RLIMIT_NOFILE (%li/%li)", (long)lim.rlim_cur, (long)lim.rlim_max); plan_tests(2); failtest_init(0, NULL); if (pipe(pfd)) abort(); fd = failtest_open("run-with-fdlimit-scratch", "run-with_fdlimit.c", 1, O_RDWR|O_CREAT, 0600); if (fd == -1) { /* We are the child: write error code for parent to check. */ ecode = errno; if (write(pfd[1], &ecode, sizeof(ecode)) != sizeof(ecode)) abort(); failtest_exit(0); } /* Check child got correct errno. */ ok1(read(pfd[0], &ecode, sizeof(ecode)) == sizeof(ecode)); ok1(ecode == EACCES); /* Clean up. */ failtest_close(fd, "run-open.c", 1); close(pfd[0]); close(pfd[1]); return exit_status(); } ntdb-1.0/lib/ccan/failtest/test/run-write.c000066400000000000000000000023511224151530700206460ustar00rootroot00000000000000/* Include the C files directly. */ #include #include #include #include #include #include #include int main(int argc, char *argv[]) { int fd; char *p; char buf[] = "Hello world!"; plan_tests(5); failtest_init(argc, argv); fd = failtest_open("run-write-scratchpad", __FILE__, __LINE__, O_RDWR|O_CREAT, 0600); /* Child will fail, ignore. */ if (fd < 0) failtest_exit(0); if (write(fd, buf, strlen(buf)) != strlen(buf)) abort(); ok1(lseek(fd, 0, SEEK_CUR) == strlen(buf)); p = failtest_malloc(100, __FILE__, __LINE__); if (!p) { /* We are the child. Do a heap of writes. */ unsigned int i; for (i = 0; i < strlen(buf)+1; i++) if (failtest_write(fd, "x", 1, __FILE__, __LINE__) == 1) break; failtest_close(fd, __FILE__, __LINE__); failtest_exit(0); } /* Seek pointer should be left alone! */ ok1(lseek(fd, 0, SEEK_CUR) == strlen(buf)); /* Length should be restored. */ ok1(lseek(fd, 0, SEEK_END) == strlen(buf)); lseek(fd, 0, SEEK_SET); ok1(read(fd, buf, strlen(buf)) == strlen("Hello world!")); ok1(strcmp(buf, "Hello world!") == 0); failtest_close(fd, __FILE__, __LINE__); return exit_status(); } ntdb-1.0/lib/ccan/hash/000077500000000000000000000000001224151530700146765ustar00rootroot00000000000000ntdb-1.0/lib/ccan/hash/_info000066400000000000000000000015211224151530700157120ustar00rootroot00000000000000#include #include /** * hash - routines for hashing bytes * * When creating a hash table it's important to have a hash function * which mixes well and is fast. This package supplies such functions. * * The hash functions come in two flavors: the normal ones and the * stable ones. The normal ones can vary from machine-to-machine and * may change if we find better or faster hash algorithms in future. * The stable ones will always give the same results on any computer, * and on any version of this package. * * License: Public Domain * Maintainer: Rusty Russell * Author: Bob Jenkins */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/build_assert\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/hash/hash.c000066400000000000000000000730161224151530700157740ustar00rootroot00000000000000/* ------------------------------------------------------------------------------- lookup3.c, by Bob Jenkins, May 2006, Public Domain. These are functions for producing 32-bit hashes for hash table lookup. hash_word(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are externally useful functions. Routines to test the hash are included if SELF_TEST is defined. You can use this free for any purpose. It's in the public domain. It has no warranty. You probably want to use hashlittle(). hashlittle() and hashbig() hash byte arrays. hashlittle() is is faster than hashbig() on little-endian machines. Intel and AMD are little-endian machines. On second thought, you probably want hashlittle2(), which is identical to hashlittle() except it returns two 32-bit hashes for the price of one. You could implement hashbig2() if you wanted but I haven't bothered here. If you want to find a hash of, say, exactly 7 integers, do a = i1; b = i2; c = i3; mix(a,b,c); a += i4; b += i5; c += i6; mix(a,b,c); a += i7; final(a,b,c); then use c as the hash value. If you have a variable length array of 4-byte integers to hash, use hash_word(). If you have a byte array (like a character string), use hashlittle(). If you have several byte arrays, or a mix of things, see the comments above hashlittle(). Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then mix those integers. This is fast (you can do a lot more thorough mixing with 12*3 instructions on 3 integers than you can with 3 instructions on 1 byte), but shoehorning those bytes into integers efficiently is messy. ------------------------------------------------------------------------------- */ //#define SELF_TEST 1 #if 0 #include /* defines printf for tests */ #include /* defines time_t for timings in the test */ #include /* defines uint32_t etc */ #include /* attempt to define endianness */ #ifdef linux # include /* attempt to define endianness */ #endif /* * My best guess at if you are big-endian or little-endian. This may * need adjustment. */ #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ __BYTE_ORDER == __LITTLE_ENDIAN) || \ (defined(i386) || defined(__i386__) || defined(__i486__) || \ defined(__i586__) || defined(__i686__) || defined(__x86_64) || \ defined(vax) || defined(MIPSEL)) # define HASH_LITTLE_ENDIAN 1 # define HASH_BIG_ENDIAN 0 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ __BYTE_ORDER == __BIG_ENDIAN) || \ (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel)) # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 1 #else # error Unknown endian #endif #endif /* old hash.c headers. */ #include "hash.h" #if HAVE_LITTLE_ENDIAN #define HASH_LITTLE_ENDIAN 1 #define HASH_BIG_ENDIAN 0 #elif HAVE_BIG_ENDIAN #define HASH_LITTLE_ENDIAN 0 #define HASH_BIG_ENDIAN 1 #else #error Unknown endian #endif #define hashsize(n) ((uint32_t)1<<(n)) #define hashmask(n) (hashsize(n)-1) #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) /* ------------------------------------------------------------------------------- mix -- mix 3 32-bit values reversibly. This is reversible, so any information in (a,b,c) before mix() is still in (a,b,c) after mix(). If four pairs of (a,b,c) inputs are run through mix(), or through mix() in reverse, there are at least 32 bits of the output that are sometimes the same for one pair and different for another pair. This was tested for: * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that satisfy this are 4 6 8 16 19 4 9 15 3 18 27 15 14 9 3 7 17 3 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for "differ" defined as + with a one-bit base and a two-bit delta. I used http://burtleburtle.net/bob/hash/avalanche.html to choose the operations, constants, and arrangements of the variables. This does not achieve avalanche. There are input bits of (a,b,c) that fail to affect some output bits of (a,b,c), especially of a. The most thoroughly mixed value is c, but it doesn't really even achieve avalanche in c. This allows some parallelism. Read-after-writes are good at doubling the number of bits affected, so the goal of mixing pulls in the opposite direction as the goal of parallelism. I did what I could. Rotates seem to cost as much as shifts on every machine I could lay my hands on, and rotates are much kinder to the top and bottom bits, so I used rotates. ------------------------------------------------------------------------------- */ #define mix(a,b,c) \ { \ a -= c; a ^= rot(c, 4); c += b; \ b -= a; b ^= rot(a, 6); a += c; \ c -= b; c ^= rot(b, 8); b += a; \ a -= c; a ^= rot(c,16); c += b; \ b -= a; b ^= rot(a,19); a += c; \ c -= b; c ^= rot(b, 4); b += a; \ } /* ------------------------------------------------------------------------------- final -- final mixing of 3 32-bit values (a,b,c) into c Pairs of (a,b,c) values differing in only a few bits will usually produce values of c that look totally different. This was tested for * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. These constants passed: 14 11 25 16 4 14 24 12 14 25 16 4 14 24 and these came close: 4 8 15 26 3 22 24 10 8 15 26 3 22 24 11 8 15 26 3 22 24 ------------------------------------------------------------------------------- */ #define final(a,b,c) \ { \ c ^= b; c -= rot(b,14); \ a ^= c; a -= rot(c,11); \ b ^= a; b -= rot(a,25); \ c ^= b; c -= rot(b,16); \ a ^= c; a -= rot(c,4); \ b ^= a; b -= rot(a,14); \ c ^= b; c -= rot(b,24); \ } /* -------------------------------------------------------------------- This works on all machines. To be useful, it requires -- that the key be an array of uint32_t's, and -- that the length be the number of uint32_t's in the key The function hash_word() is identical to hashlittle() on little-endian machines, and identical to hashbig() on big-endian machines, except that the length has to be measured in uint32_ts rather than in bytes. hashlittle() is more complicated than hash_word() only because hashlittle() has to dance around fitting the key bytes into registers. -------------------------------------------------------------------- */ uint32_t hash_u32( const uint32_t *k, /* the key, an array of uint32_t values */ size_t length, /* the length of the key, in uint32_ts */ uint32_t initval) /* the previous hash, or an arbitrary value */ { uint32_t a,b,c; /* Set up the internal state */ a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval; /*------------------------------------------------- handle most of the key */ while (length > 3) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 3; k += 3; } /*------------------------------------------- handle the last 3 uint32_t's */ switch(length) /* all the case statements fall through */ { case 3 : c+=k[2]; case 2 : b+=k[1]; case 1 : a+=k[0]; final(a,b,c); case 0: /* case 0: nothing left to add */ break; } /*------------------------------------------------------ report the result */ return c; } /* ------------------------------------------------------------------------------- hashlittle() -- hash a variable-length key into a 32-bit value k : the key (the unaligned variable-length array of bytes) length : the length of the key, counting by bytes val2 : IN: can be any 4-byte value OUT: second 32 bit hash. Returns a 32-bit value. Every bit of the key affects every bit of the return value. Two keys differing by one or two bits will have totally different hash values. Note that the return value is better mixed than val2, so use that first. The best hash table sizes are powers of 2. There is no need to do mod a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. For example, if you need only 10 bits, do h = (h & hashmask(10)); In which case, the hash table should have hashsize(10) elements. If you are hashing n strings (uint8_t **)k, do it like this: for (i=0, h=0; i 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ /* * "k[2]&0xffffff" actually reads beyond the end of the string, but * then masks off the part it's not allowed to read. Because the * string is aligned, the masked-off tail is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND will * still catch it and complain. The masking trick does make the hash * noticably faster for short strings (like English words). * * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR. */ #if 0 switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=k[1]&0xffffff; a+=k[0]; break; case 6 : b+=k[1]&0xffff; a+=k[0]; break; case 5 : b+=k[1]&0xff; a+=k[0]; break; case 4 : a+=k[0]; break; case 3 : a+=k[0]&0xffffff; break; case 2 : a+=k[0]&0xffff; break; case 1 : a+=k[0]&0xff; break; case 0 : return c; /* zero length strings require no mixing */ } #else /* make valgrind happy */ k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ case 9 : c+=k8[8]; /* fall through */ case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ case 5 : b+=k8[4]; /* fall through */ case 4 : a+=k[0]; break; case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ case 1 : a+=k8[0]; break; case 0 : return c; } #endif /* !valgrind */ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ const uint8_t *k8; /*--------------- all but last block: aligned reads and different mixing */ while (length > 12) { a += k[0] + (((uint32_t)k[1])<<16); b += k[2] + (((uint32_t)k[3])<<16); c += k[4] + (((uint32_t)k[5])<<16); mix(a,b,c); length -= 12; k += 6; } /*----------------------------- handle the last (probably partial) block */ k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[4]+(((uint32_t)k[5])<<16); b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ case 10: c+=k[4]; b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 9 : c+=k8[8]; /* fall through */ case 8 : b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ case 6 : b+=k[2]; a+=k[0]+(((uint32_t)k[1])<<16); break; case 5 : b+=k8[4]; /* fall through */ case 4 : a+=k[0]+(((uint32_t)k[1])<<16); break; case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ case 2 : a+=k[0]; break; case 1 : a+=k8[0]; break; case 0 : return c; /* zero length requires no mixing */ } } else { /* need to read the key one byte at a time */ const uint8_t *k = (const uint8_t *)key; /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; a += ((uint32_t)k[1])<<8; a += ((uint32_t)k[2])<<16; a += ((uint32_t)k[3])<<24; b += k[4]; b += ((uint32_t)k[5])<<8; b += ((uint32_t)k[6])<<16; b += ((uint32_t)k[7])<<24; c += k[8]; c += ((uint32_t)k[9])<<8; c += ((uint32_t)k[10])<<16; c += ((uint32_t)k[11])<<24; mix(a,b,c); length -= 12; k += 12; } /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { case 12: c+=((uint32_t)k[11])<<24; case 11: c+=((uint32_t)k[10])<<16; case 10: c+=((uint32_t)k[9])<<8; case 9 : c+=k[8]; case 8 : b+=((uint32_t)k[7])<<24; case 7 : b+=((uint32_t)k[6])<<16; case 6 : b+=((uint32_t)k[5])<<8; case 5 : b+=k[4]; case 4 : a+=((uint32_t)k[3])<<24; case 3 : a+=((uint32_t)k[2])<<16; case 2 : a+=((uint32_t)k[1])<<8; case 1 : a+=k[0]; break; case 0 : return c; } } final(a,b,c); *val2 = b; return c; } /* * hashbig(): * This is the same as hash_word() on big-endian machines. It is different * from hashlittle() on all machines. hashbig() takes advantage of * big-endian byte ordering. */ static uint32_t hashbig( const void *key, size_t length, uint32_t *val2) { uint32_t a,b,c; union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */ /* Set up the internal state */ a = b = c = 0xdeadbeef + ((uint32_t)length) + *val2; u.ptr = key; if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) { const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ const uint8_t *k8; /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ /* * "k[2]<<8" actually reads beyond the end of the string, but * then shifts out the part it's not allowed to read. Because the * string is aligned, the illegal read is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND will * still catch it and complain. The masking trick does make the hash * noticably faster for short strings (like English words). * * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR. */ #if 0 switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break; case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break; case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=k[1]&0xffffff00; a+=k[0]; break; case 6 : b+=k[1]&0xffff0000; a+=k[0]; break; case 5 : b+=k[1]&0xff000000; a+=k[0]; break; case 4 : a+=k[0]; break; case 3 : a+=k[0]&0xffffff00; break; case 2 : a+=k[0]&0xffff0000; break; case 1 : a+=k[0]&0xff000000; break; case 0 : return c; /* zero length strings require no mixing */ } #else /* make valgrind happy */ k8 = (const uint8_t *)k; switch(length) /* all the case statements fall through */ { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=((uint32_t)k8[10])<<8; /* fall through */ case 10: c+=((uint32_t)k8[9])<<16; /* fall through */ case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */ case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */ case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */ case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */ case 4 : a+=k[0]; break; case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */ case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */ case 1 : a+=((uint32_t)k8[0])<<24; break; case 0 : return c; } #endif /* !VALGRIND */ } else { /* need to read the key one byte at a time */ const uint8_t *k = (const uint8_t *)key; /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += ((uint32_t)k[0])<<24; a += ((uint32_t)k[1])<<16; a += ((uint32_t)k[2])<<8; a += ((uint32_t)k[3]); b += ((uint32_t)k[4])<<24; b += ((uint32_t)k[5])<<16; b += ((uint32_t)k[6])<<8; b += ((uint32_t)k[7]); c += ((uint32_t)k[8])<<24; c += ((uint32_t)k[9])<<16; c += ((uint32_t)k[10])<<8; c += ((uint32_t)k[11]); mix(a,b,c); length -= 12; k += 12; } /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { case 12: c+=k[11]; case 11: c+=((uint32_t)k[10])<<8; case 10: c+=((uint32_t)k[9])<<16; case 9 : c+=((uint32_t)k[8])<<24; case 8 : b+=k[7]; case 7 : b+=((uint32_t)k[6])<<8; case 6 : b+=((uint32_t)k[5])<<16; case 5 : b+=((uint32_t)k[4])<<24; case 4 : a+=k[3]; case 3 : a+=((uint32_t)k[2])<<8; case 2 : a+=((uint32_t)k[1])<<16; case 1 : a+=((uint32_t)k[0])<<24; break; case 0 : return c; } } final(a,b,c); *val2 = b; return c; } /* I basically use hashlittle here, but use native endian within each * element. This delivers least-surprise: hash such as "int arr[] = { * 1, 2 }; hash_stable(arr, 2, 0);" will be the same on big and little * endian machines, even though a bytewise hash wouldn't be. */ uint64_t hash64_stable_64(const void *key, size_t n, uint64_t base) { const uint64_t *k = key; uint32_t a,b,c; /* Set up the internal state */ a = b = c = 0xdeadbeef + ((uint32_t)n*8) + (base >> 32) + base; while (n > 3) { a += (uint32_t)k[0]; b += (uint32_t)(k[0] >> 32); c += (uint32_t)k[1]; mix(a,b,c); a += (uint32_t)(k[1] >> 32); b += (uint32_t)k[2]; c += (uint32_t)(k[2] >> 32); mix(a,b,c); n -= 3; k += 3; } switch (n) { case 2: a += (uint32_t)k[0]; b += (uint32_t)(k[0] >> 32); c += (uint32_t)k[1]; mix(a,b,c); a += (uint32_t)(k[1] >> 32); break; case 1: a += (uint32_t)k[0]; b += (uint32_t)(k[0] >> 32); break; case 0: return c; } final(a,b,c); return ((uint64_t)b << 32) | c; } uint64_t hash64_stable_32(const void *key, size_t n, uint64_t base) { const uint32_t *k = key; uint32_t a,b,c; /* Set up the internal state */ a = b = c = 0xdeadbeef + ((uint32_t)n*4) + (base >> 32) + base; while (n > 3) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); n -= 3; k += 3; } switch (n) { case 2: b += (uint32_t)k[1]; case 1: a += (uint32_t)k[0]; break; case 0: return c; } final(a,b,c); return ((uint64_t)b << 32) | c; } uint64_t hash64_stable_16(const void *key, size_t n, uint64_t base) { const uint16_t *k = key; uint32_t a,b,c; /* Set up the internal state */ a = b = c = 0xdeadbeef + ((uint32_t)n*2) + (base >> 32) + base; while (n > 6) { a += (uint32_t)k[0] + ((uint32_t)k[1] << 16); b += (uint32_t)k[2] + ((uint32_t)k[3] << 16); c += (uint32_t)k[4] + ((uint32_t)k[5] << 16); mix(a,b,c); n -= 6; k += 6; } switch (n) { case 5: c += (uint32_t)k[4]; case 4: b += ((uint32_t)k[3] << 16); case 3: b += (uint32_t)k[2]; case 2: a += ((uint32_t)k[1] << 16); case 1: a += (uint32_t)k[0]; break; case 0: return c; } final(a,b,c); return ((uint64_t)b << 32) | c; } uint64_t hash64_stable_8(const void *key, size_t n, uint64_t base) { uint32_t b32 = base + (base >> 32); uint32_t lower = hashlittle(key, n, &b32); return ((uint64_t)b32 << 32) | lower; } uint32_t hash_any(const void *key, size_t length, uint32_t base) { if (HASH_BIG_ENDIAN) return hashbig(key, length, &base); else return hashlittle(key, length, &base); } uint32_t hash_stable_64(const void *key, size_t n, uint32_t base) { return hash64_stable_64(key, n, base); } uint32_t hash_stable_32(const void *key, size_t n, uint32_t base) { return hash64_stable_32(key, n, base); } uint32_t hash_stable_16(const void *key, size_t n, uint32_t base) { return hash64_stable_16(key, n, base); } uint32_t hash_stable_8(const void *key, size_t n, uint32_t base) { return hashlittle(key, n, &base); } /* Jenkins' lookup8 is a 64 bit hash, but he says it's obsolete. Use * the plain one and recombine into 64 bits. */ uint64_t hash64_any(const void *key, size_t length, uint64_t base) { uint32_t b32 = base + (base >> 32); uint32_t lower; if (HASH_BIG_ENDIAN) lower = hashbig(key, length, &b32); else lower = hashlittle(key, length, &b32); return ((uint64_t)b32 << 32) | lower; } #ifdef SELF_TEST /* used for timings */ void driver1() { uint8_t buf[256]; uint32_t i; uint32_t h=0; time_t a,z; time(&a); for (i=0; i<256; ++i) buf[i] = 'x'; for (i=0; i<1; ++i) { h = hashlittle(&buf[0],1,h); } time(&z); if (z-a > 0) printf("time %d %.8x\n", z-a, h); } /* check that every input bit changes every output bit half the time */ #define HASHSTATE 1 #define HASHLEN 1 #define MAXPAIR 60 #define MAXLEN 70 void driver2() { uint8_t qa[MAXLEN+1], qb[MAXLEN+2], *a = &qa[0], *b = &qb[1]; uint32_t c[HASHSTATE], d[HASHSTATE], i=0, j=0, k, l, m=0, z; uint32_t e[HASHSTATE],f[HASHSTATE],g[HASHSTATE],h[HASHSTATE]; uint32_t x[HASHSTATE],y[HASHSTATE]; uint32_t hlen; printf("No more than %d trials should ever be needed \n",MAXPAIR/2); for (hlen=0; hlen < MAXLEN; ++hlen) { z=0; for (i=0; i>(8-j)); c[0] = hashlittle(a, hlen, m); b[i] ^= ((k+1)<>(8-j)); d[0] = hashlittle(b, hlen, m); /* check every bit is 1, 0, set, and not set at least once */ for (l=0; lz) z=k; if (k==MAXPAIR) { printf("Some bit didn't change: "); printf("%.8x %.8x %.8x %.8x %.8x %.8x ", e[0],f[0],g[0],h[0],x[0],y[0]); printf("i %d j %d m %d len %d\n", i, j, m, hlen); } if (z==MAXPAIR) goto done; } } } done: if (z < MAXPAIR) { printf("Mix success %2d bytes %2d initvals ",i,m); printf("required %d trials\n", z/2); } } printf("\n"); } /* Check for reading beyond the end of the buffer and alignment problems */ void driver3() { uint8_t buf[MAXLEN+20], *b; uint32_t len; uint8_t q[] = "This is the time for all good men to come to the aid of their country..."; uint32_t h; uint8_t qq[] = "xThis is the time for all good men to come to the aid of their country..."; uint32_t i; uint8_t qqq[] = "xxThis is the time for all good men to come to the aid of their country..."; uint32_t j; uint8_t qqqq[] = "xxxThis is the time for all good men to come to the aid of their country..."; uint32_t ref,x,y; uint8_t *p; printf("Endianness. These lines should all be the same (for values filled in):\n"); printf("%.8x %.8x %.8x\n", hash_word((const uint32_t *)q, (sizeof(q)-1)/4, 13), hash_word((const uint32_t *)q, (sizeof(q)-5)/4, 13), hash_word((const uint32_t *)q, (sizeof(q)-9)/4, 13)); p = q; printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); p = &qq[1]; printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); p = &qqq[2]; printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); p = &qqqq[3]; printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); printf("\n"); /* check that hashlittle2 and hashlittle produce the same results */ i=47; j=0; hashlittle2(q, sizeof(q), &i, &j); if (hashlittle(q, sizeof(q), 47) != i) printf("hashlittle2 and hashlittle mismatch\n"); /* check that hash_word2 and hash_word produce the same results */ len = 0xdeadbeef; i=47, j=0; hash_word2(&len, 1, &i, &j); if (hash_word(&len, 1, 47) != i) printf("hash_word2 and hash_word mismatch %x %x\n", i, hash_word(&len, 1, 47)); /* check hashlittle doesn't read before or after the ends of the string */ for (h=0, b=buf+1; h<8; ++h, ++b) { for (i=0; i #include #include /* Stolen mostly from: lookup3.c, by Bob Jenkins, May 2006, Public Domain. * * http://burtleburtle.net/bob/c/lookup3.c */ /** * hash - fast hash of an array for internal use * @p: the array or pointer to first element * @num: the number of elements to hash * @base: the base number to roll into the hash (usually 0) * * The memory region pointed to by p is combined with the base to form * a 32-bit hash. * * This hash will have different results on different machines, so is * only useful for internal hashes (ie. not hashes sent across the * network or saved to disk). * * It may also change with future versions: it could even detect at runtime * what the fastest hash to use is. * * See also: hash64, hash_stable. * * Example: * #include * #include * #include * #include * * // Simple demonstration: idential strings will have the same hash, but * // two different strings will probably not. * int main(int argc, char *argv[]) * { * uint32_t hash1, hash2; * * if (argc != 3) * err(1, "Usage: %s ", argv[0]); * * hash1 = hash(argv[1], strlen(argv[1]), 0); * hash2 = hash(argv[2], strlen(argv[2]), 0); * printf("Hash is %s\n", hash1 == hash2 ? "same" : "different"); * return 0; * } */ #define hash(p, num, base) hash_any((p), (num)*sizeof(*(p)), (base)) /** * hash_stable - hash of an array for external use * @p: the array or pointer to first element * @num: the number of elements to hash * @base: the base number to roll into the hash (usually 0) * * The array of simple integer types pointed to by p is combined with * the base to form a 32-bit hash. * * This hash will have the same results on different machines, so can * be used for external hashes (ie. hashes sent across the network or * saved to disk). The results will not change in future versions of * this module. * * Note that it is only legal to hand an array of simple integer types * to this hash (ie. char, uint16_t, int64_t, etc). In these cases, * the same values will have the same hash result, even though the * memory representations of integers depend on the machine * endianness. * * See also: * hash64_stable * * Example: * #include * #include * #include * #include * * int main(int argc, char *argv[]) * { * if (argc != 2) * err(1, "Usage: %s ", argv[0]); * * printf("Hash stable result is %u\n", * hash_stable(argv[1], strlen(argv[1]), 0)); * return 0; * } */ #define hash_stable(p, num, base) \ (BUILD_ASSERT_OR_ZERO(sizeof(*(p)) == 8 || sizeof(*(p)) == 4 \ || sizeof(*(p)) == 2 || sizeof(*(p)) == 1) + \ sizeof(*(p)) == 8 ? hash_stable_64((p), (num), (base)) \ : sizeof(*(p)) == 4 ? hash_stable_32((p), (num), (base)) \ : sizeof(*(p)) == 2 ? hash_stable_16((p), (num), (base)) \ : hash_stable_8((p), (num), (base))) /** * hash_u32 - fast hash an array of 32-bit values for internal use * @key: the array of uint32_t * @num: the number of elements to hash * @base: the base number to roll into the hash (usually 0) * * The array of uint32_t pointed to by @key is combined with the base * to form a 32-bit hash. This is 2-3 times faster than hash() on small * arrays, but the advantage vanishes over large hashes. * * This hash will have different results on different machines, so is * only useful for internal hashes (ie. not hashes sent across the * network or saved to disk). */ uint32_t hash_u32(const uint32_t *key, size_t num, uint32_t base); /** * hash_string - very fast hash of an ascii string * @str: the nul-terminated string * * The string is hashed, using a hash function optimized for ASCII and * similar strings. It's weaker than the other hash functions. * * This hash may have different results on different machines, so is * only useful for internal hashes (ie. not hashes sent across the * network or saved to disk). The results will be different from the * other hash functions in this module, too. */ static inline uint32_t hash_string(const char *string) { /* This is Karl Nelson 's X31 hash. * It's a little faster than the (much better) lookup3 hash(): 56ns vs * 84ns on my 2GHz Intel Core Duo 2 laptop for a 10 char string. */ uint32_t ret; for (ret = 0; *string; string++) ret = (ret << 5) - ret + *string; return ret; } /** * hash64 - fast 64-bit hash of an array for internal use * @p: the array or pointer to first element * @num: the number of elements to hash * @base: the 64-bit base number to roll into the hash (usually 0) * * The memory region pointed to by p is combined with the base to form * a 64-bit hash. * * This hash will have different results on different machines, so is * only useful for internal hashes (ie. not hashes sent across the * network or saved to disk). * * It may also change with future versions: it could even detect at runtime * what the fastest hash to use is. * * See also: hash. * * Example: * #include * #include * #include * #include * * // Simple demonstration: idential strings will have the same hash, but * // two different strings will probably not. * int main(int argc, char *argv[]) * { * uint64_t hash1, hash2; * * if (argc != 3) * err(1, "Usage: %s ", argv[0]); * * hash1 = hash64(argv[1], strlen(argv[1]), 0); * hash2 = hash64(argv[2], strlen(argv[2]), 0); * printf("Hash is %s\n", hash1 == hash2 ? "same" : "different"); * return 0; * } */ #define hash64(p, num, base) hash64_any((p), (num)*sizeof(*(p)), (base)) /** * hash64_stable - 64 bit hash of an array for external use * @p: the array or pointer to first element * @num: the number of elements to hash * @base: the base number to roll into the hash (usually 0) * * The array of simple integer types pointed to by p is combined with * the base to form a 64-bit hash. * * This hash will have the same results on different machines, so can * be used for external hashes (ie. hashes sent across the network or * saved to disk). The results will not change in future versions of * this module. * * Note that it is only legal to hand an array of simple integer types * to this hash (ie. char, uint16_t, int64_t, etc). In these cases, * the same values will have the same hash result, even though the * memory representations of integers depend on the machine * endianness. * * See also: * hash_stable * * Example: * #include * #include * #include * #include * * int main(int argc, char *argv[]) * { * if (argc != 2) * err(1, "Usage: %s ", argv[0]); * * printf("Hash stable result is %llu\n", * (long long)hash64_stable(argv[1], strlen(argv[1]), 0)); * return 0; * } */ #define hash64_stable(p, num, base) \ (BUILD_ASSERT_OR_ZERO(sizeof(*(p)) == 8 || sizeof(*(p)) == 4 \ || sizeof(*(p)) == 2 || sizeof(*(p)) == 1) + \ sizeof(*(p)) == 8 ? hash64_stable_64((p), (num), (base)) \ : sizeof(*(p)) == 4 ? hash64_stable_32((p), (num), (base)) \ : sizeof(*(p)) == 2 ? hash64_stable_16((p), (num), (base)) \ : hash64_stable_8((p), (num), (base))) /** * hashl - fast 32/64-bit hash of an array for internal use * @p: the array or pointer to first element * @num: the number of elements to hash * @base: the base number to roll into the hash (usually 0) * * This is either hash() or hash64(), on 32/64 bit long machines. */ #define hashl(p, num, base) \ (BUILD_ASSERT_OR_ZERO(sizeof(long) == sizeof(uint32_t) \ || sizeof(long) == sizeof(uint64_t)) + \ (sizeof(long) == sizeof(uint64_t) \ ? hash64((p), (num), (base)) : hash((p), (num), (base)))) /* Our underlying operations. */ uint32_t hash_any(const void *key, size_t length, uint32_t base); uint32_t hash_stable_64(const void *key, size_t n, uint32_t base); uint32_t hash_stable_32(const void *key, size_t n, uint32_t base); uint32_t hash_stable_16(const void *key, size_t n, uint32_t base); uint32_t hash_stable_8(const void *key, size_t n, uint32_t base); uint64_t hash64_any(const void *key, size_t length, uint64_t base); uint64_t hash64_stable_64(const void *key, size_t n, uint64_t base); uint64_t hash64_stable_32(const void *key, size_t n, uint64_t base); uint64_t hash64_stable_16(const void *key, size_t n, uint64_t base); uint64_t hash64_stable_8(const void *key, size_t n, uint64_t base); /** * hash_pointer - hash a pointer for internal use * @p: the pointer value to hash * @base: the base number to roll into the hash (usually 0) * * The pointer p (not what p points to!) is combined with the base to form * a 32-bit hash. * * This hash will have different results on different machines, so is * only useful for internal hashes (ie. not hashes sent across the * network or saved to disk). * * Example: * #include * * // Code to keep track of memory regions. * struct region { * struct region *chain; * void *start; * unsigned int size; * }; * // We keep a simple hash table. * static struct region *region_hash[128]; * * static void add_region(struct region *r) * { * unsigned int h = hash_pointer(r->start, 0); * * r->chain = region_hash[h]; * region_hash[h] = r->chain; * } * * static struct region *find_region(const void *start) * { * struct region *r; * * for (r = region_hash[hash_pointer(start, 0)]; r; r = r->chain) * if (r->start == start) * return r; * return NULL; * } */ static inline uint32_t hash_pointer(const void *p, uint32_t base) { if (sizeof(p) % sizeof(uint32_t) == 0) { /* This convoluted union is the right way of aliasing. */ union { uint32_t u32[sizeof(p) / sizeof(uint32_t)]; const void *p; } u; u.p = p; return hash_u32(u.u32, sizeof(p) / sizeof(uint32_t), base); } else return hash(&p, 1, base); } #endif /* HASH_H */ ntdb-1.0/lib/ccan/hash/test/000077500000000000000000000000001224151530700156555ustar00rootroot00000000000000ntdb-1.0/lib/ccan/hash/test/api-hash_stable.c000066400000000000000000000451211224151530700210500ustar00rootroot00000000000000#include #include #include #include #define ARRAY_WORDS 5 int main(int argc, char *argv[]) { unsigned int i; uint8_t u8array[ARRAY_WORDS]; uint16_t u16array[ARRAY_WORDS]; uint32_t u32array[ARRAY_WORDS]; uint64_t u64array[ARRAY_WORDS]; /* Initialize arrays. */ for (i = 0; i < ARRAY_WORDS; i++) { u8array[i] = i; u16array[i] = i; u32array[i] = i; u64array[i] = i; } plan_tests(264); /* hash_stable is API-guaranteed. */ ok1(hash_stable(u8array, ARRAY_WORDS, 0) == 0x1d4833cc); ok1(hash_stable(u8array, ARRAY_WORDS, 1) == 0x37125e2 ); ok1(hash_stable(u8array, ARRAY_WORDS, 2) == 0x330a007a); ok1(hash_stable(u8array, ARRAY_WORDS, 4) == 0x7b0df29b); ok1(hash_stable(u8array, ARRAY_WORDS, 8) == 0xe7e5d741); ok1(hash_stable(u8array, ARRAY_WORDS, 16) == 0xaae57471); ok1(hash_stable(u8array, ARRAY_WORDS, 32) == 0xc55399e5); ok1(hash_stable(u8array, ARRAY_WORDS, 64) == 0x67f21f7 ); ok1(hash_stable(u8array, ARRAY_WORDS, 128) == 0x1d795b71); ok1(hash_stable(u8array, ARRAY_WORDS, 256) == 0xeb961671); ok1(hash_stable(u8array, ARRAY_WORDS, 512) == 0xc2597247); ok1(hash_stable(u8array, ARRAY_WORDS, 1024) == 0x3f5c4d75); ok1(hash_stable(u8array, ARRAY_WORDS, 2048) == 0xe65cf4f9); ok1(hash_stable(u8array, ARRAY_WORDS, 4096) == 0xf2cd06cb); ok1(hash_stable(u8array, ARRAY_WORDS, 8192) == 0x443041e1); ok1(hash_stable(u8array, ARRAY_WORDS, 16384) == 0xdfc618f5); ok1(hash_stable(u8array, ARRAY_WORDS, 32768) == 0x5e3d5b97); ok1(hash_stable(u8array, ARRAY_WORDS, 65536) == 0xd5f64730); ok1(hash_stable(u8array, ARRAY_WORDS, 131072) == 0x372bbecc); ok1(hash_stable(u8array, ARRAY_WORDS, 262144) == 0x7c194c8d); ok1(hash_stable(u8array, ARRAY_WORDS, 524288) == 0x16cbb416); ok1(hash_stable(u8array, ARRAY_WORDS, 1048576) == 0x53e99222); ok1(hash_stable(u8array, ARRAY_WORDS, 2097152) == 0x6394554a); ok1(hash_stable(u8array, ARRAY_WORDS, 4194304) == 0xd83a506d); ok1(hash_stable(u8array, ARRAY_WORDS, 8388608) == 0x7619d9a4); ok1(hash_stable(u8array, ARRAY_WORDS, 16777216) == 0xfe98e5f6); ok1(hash_stable(u8array, ARRAY_WORDS, 33554432) == 0x6c262927); ok1(hash_stable(u8array, ARRAY_WORDS, 67108864) == 0x3f0106fd); ok1(hash_stable(u8array, ARRAY_WORDS, 134217728) == 0xc91e3a28); ok1(hash_stable(u8array, ARRAY_WORDS, 268435456) == 0x14229579); ok1(hash_stable(u8array, ARRAY_WORDS, 536870912) == 0x9dbefa76); ok1(hash_stable(u8array, ARRAY_WORDS, 1073741824) == 0xb05c0c78); ok1(hash_stable(u8array, ARRAY_WORDS, 2147483648U) == 0x88f24d81); ok1(hash_stable(u16array, ARRAY_WORDS, 0) == 0xecb5f507); ok1(hash_stable(u16array, ARRAY_WORDS, 1) == 0xadd666e6); ok1(hash_stable(u16array, ARRAY_WORDS, 2) == 0xea0f214c); ok1(hash_stable(u16array, ARRAY_WORDS, 4) == 0xae4051ba); ok1(hash_stable(u16array, ARRAY_WORDS, 8) == 0x6ed28026); ok1(hash_stable(u16array, ARRAY_WORDS, 16) == 0xa3917a19); ok1(hash_stable(u16array, ARRAY_WORDS, 32) == 0xf370f32b); ok1(hash_stable(u16array, ARRAY_WORDS, 64) == 0x807af460); ok1(hash_stable(u16array, ARRAY_WORDS, 128) == 0xb4c8cd83); ok1(hash_stable(u16array, ARRAY_WORDS, 256) == 0xa10cb5b0); ok1(hash_stable(u16array, ARRAY_WORDS, 512) == 0x8b7d7387); ok1(hash_stable(u16array, ARRAY_WORDS, 1024) == 0x9e49d1c ); ok1(hash_stable(u16array, ARRAY_WORDS, 2048) == 0x288830d1); ok1(hash_stable(u16array, ARRAY_WORDS, 4096) == 0xbe078a43); ok1(hash_stable(u16array, ARRAY_WORDS, 8192) == 0xa16d5d88); ok1(hash_stable(u16array, ARRAY_WORDS, 16384) == 0x46839fcd); ok1(hash_stable(u16array, ARRAY_WORDS, 32768) == 0x9db9bd4f); ok1(hash_stable(u16array, ARRAY_WORDS, 65536) == 0xedff58f8); ok1(hash_stable(u16array, ARRAY_WORDS, 131072) == 0x95ecef18); ok1(hash_stable(u16array, ARRAY_WORDS, 262144) == 0x23c31b7d); ok1(hash_stable(u16array, ARRAY_WORDS, 524288) == 0x1d85c7d0); ok1(hash_stable(u16array, ARRAY_WORDS, 1048576) == 0x25218842); ok1(hash_stable(u16array, ARRAY_WORDS, 2097152) == 0x711d985c); ok1(hash_stable(u16array, ARRAY_WORDS, 4194304) == 0x85470eca); ok1(hash_stable(u16array, ARRAY_WORDS, 8388608) == 0x99ed4ceb); ok1(hash_stable(u16array, ARRAY_WORDS, 16777216) == 0x67b3710c); ok1(hash_stable(u16array, ARRAY_WORDS, 33554432) == 0x77f1ab35); ok1(hash_stable(u16array, ARRAY_WORDS, 67108864) == 0x81f688aa); ok1(hash_stable(u16array, ARRAY_WORDS, 134217728) == 0x27b56ca5); ok1(hash_stable(u16array, ARRAY_WORDS, 268435456) == 0xf21ba203); ok1(hash_stable(u16array, ARRAY_WORDS, 536870912) == 0xd48d1d1 ); ok1(hash_stable(u16array, ARRAY_WORDS, 1073741824) == 0xa542b62d); ok1(hash_stable(u16array, ARRAY_WORDS, 2147483648U) == 0xa04c7058); ok1(hash_stable(u32array, ARRAY_WORDS, 0) == 0x13305f8c); ok1(hash_stable(u32array, ARRAY_WORDS, 1) == 0x171abf74); ok1(hash_stable(u32array, ARRAY_WORDS, 2) == 0x7646fcc7); ok1(hash_stable(u32array, ARRAY_WORDS, 4) == 0xa758ed5); ok1(hash_stable(u32array, ARRAY_WORDS, 8) == 0x2dedc2e4); ok1(hash_stable(u32array, ARRAY_WORDS, 16) == 0x28e2076b); ok1(hash_stable(u32array, ARRAY_WORDS, 32) == 0xb73091c5); ok1(hash_stable(u32array, ARRAY_WORDS, 64) == 0x87daf5db); ok1(hash_stable(u32array, ARRAY_WORDS, 128) == 0xa16dfe20); ok1(hash_stable(u32array, ARRAY_WORDS, 256) == 0x300c63c3); ok1(hash_stable(u32array, ARRAY_WORDS, 512) == 0x255c91fc); ok1(hash_stable(u32array, ARRAY_WORDS, 1024) == 0x6357b26); ok1(hash_stable(u32array, ARRAY_WORDS, 2048) == 0x4bc5f339); ok1(hash_stable(u32array, ARRAY_WORDS, 4096) == 0x1301617c); ok1(hash_stable(u32array, ARRAY_WORDS, 8192) == 0x506792c9); ok1(hash_stable(u32array, ARRAY_WORDS, 16384) == 0xcd596705); ok1(hash_stable(u32array, ARRAY_WORDS, 32768) == 0xa8713cac); ok1(hash_stable(u32array, ARRAY_WORDS, 65536) == 0x94d9794); ok1(hash_stable(u32array, ARRAY_WORDS, 131072) == 0xac753e8); ok1(hash_stable(u32array, ARRAY_WORDS, 262144) == 0xcd8bdd20); ok1(hash_stable(u32array, ARRAY_WORDS, 524288) == 0xd44faf80); ok1(hash_stable(u32array, ARRAY_WORDS, 1048576) == 0x2547ccbe); ok1(hash_stable(u32array, ARRAY_WORDS, 2097152) == 0xbab06dbc); ok1(hash_stable(u32array, ARRAY_WORDS, 4194304) == 0xaac0e882); ok1(hash_stable(u32array, ARRAY_WORDS, 8388608) == 0x443f48d0); ok1(hash_stable(u32array, ARRAY_WORDS, 16777216) == 0xdff49fcc); ok1(hash_stable(u32array, ARRAY_WORDS, 33554432) == 0x9ce0fd65); ok1(hash_stable(u32array, ARRAY_WORDS, 67108864) == 0x9ddb1def); ok1(hash_stable(u32array, ARRAY_WORDS, 134217728) == 0x86096f25); ok1(hash_stable(u32array, ARRAY_WORDS, 268435456) == 0xe713b7b5); ok1(hash_stable(u32array, ARRAY_WORDS, 536870912) == 0x5baeffc5); ok1(hash_stable(u32array, ARRAY_WORDS, 1073741824) == 0xde874f52); ok1(hash_stable(u32array, ARRAY_WORDS, 2147483648U) == 0xeca13b4e); ok1(hash_stable(u64array, ARRAY_WORDS, 0) == 0x12ef6302); ok1(hash_stable(u64array, ARRAY_WORDS, 1) == 0xe9aeb406); ok1(hash_stable(u64array, ARRAY_WORDS, 2) == 0xc4218ceb); ok1(hash_stable(u64array, ARRAY_WORDS, 4) == 0xb3d11412); ok1(hash_stable(u64array, ARRAY_WORDS, 8) == 0xdafbd654); ok1(hash_stable(u64array, ARRAY_WORDS, 16) == 0x9c336cba); ok1(hash_stable(u64array, ARRAY_WORDS, 32) == 0x65059721); ok1(hash_stable(u64array, ARRAY_WORDS, 64) == 0x95b5bbe6); ok1(hash_stable(u64array, ARRAY_WORDS, 128) == 0xe7596b84); ok1(hash_stable(u64array, ARRAY_WORDS, 256) == 0x503622a2); ok1(hash_stable(u64array, ARRAY_WORDS, 512) == 0xecdcc5ca); ok1(hash_stable(u64array, ARRAY_WORDS, 1024) == 0xc40d0513); ok1(hash_stable(u64array, ARRAY_WORDS, 2048) == 0xaab25e4d); ok1(hash_stable(u64array, ARRAY_WORDS, 4096) == 0xcc353fb9); ok1(hash_stable(u64array, ARRAY_WORDS, 8192) == 0x18e2319f); ok1(hash_stable(u64array, ARRAY_WORDS, 16384) == 0xfddaae8d); ok1(hash_stable(u64array, ARRAY_WORDS, 32768) == 0xef7976f2); ok1(hash_stable(u64array, ARRAY_WORDS, 65536) == 0x86359fc9); ok1(hash_stable(u64array, ARRAY_WORDS, 131072) == 0x8b5af385); ok1(hash_stable(u64array, ARRAY_WORDS, 262144) == 0x80d4ee31); ok1(hash_stable(u64array, ARRAY_WORDS, 524288) == 0x42f5f85b); ok1(hash_stable(u64array, ARRAY_WORDS, 1048576) == 0x9a6920e1); ok1(hash_stable(u64array, ARRAY_WORDS, 2097152) == 0x7b7c9850); ok1(hash_stable(u64array, ARRAY_WORDS, 4194304) == 0x69573e09); ok1(hash_stable(u64array, ARRAY_WORDS, 8388608) == 0xc942bc0e); ok1(hash_stable(u64array, ARRAY_WORDS, 16777216) == 0x7a89f0f1); ok1(hash_stable(u64array, ARRAY_WORDS, 33554432) == 0x2dd641ca); ok1(hash_stable(u64array, ARRAY_WORDS, 67108864) == 0x89bbd391); ok1(hash_stable(u64array, ARRAY_WORDS, 134217728) == 0xbcf88e31); ok1(hash_stable(u64array, ARRAY_WORDS, 268435456) == 0xfa7a3460); ok1(hash_stable(u64array, ARRAY_WORDS, 536870912) == 0x49a37be0); ok1(hash_stable(u64array, ARRAY_WORDS, 1073741824) == 0x1b346394); ok1(hash_stable(u64array, ARRAY_WORDS, 2147483648U) == 0x6c3a1592); ok1(hash64_stable(u8array, ARRAY_WORDS, 0) == 16887282882572727244ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 1) == 12032777473133454818ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 2) == 18183407363221487738ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 4) == 17860764172704150171ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 8) == 18076051600675559233ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 16) == 9909361918431556721ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 32) == 12937969888744675813ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 64) == 5245669057381736951ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 128) == 4376874646406519665ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 256) == 14219974419871569521ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 512) == 2263415354134458951ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 1024) == 4953859694526221685ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 2048) == 3432228642067641593ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 4096) == 1219647244417697483ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 8192) == 7629939424585859553ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 16384) == 10041660531376789749ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 32768) == 13859885793922603927ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 65536) == 15069060338344675120ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 131072) == 818163430835601100ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 262144) == 14914314323019517069ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 524288) == 17518437749769352214ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 1048576) == 14920048004901212706ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 2097152) == 8758567366332536138ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 4194304) == 6226655736088907885ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 8388608) == 13716650013685832100ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 16777216) == 305325651636315638ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 33554432) == 16784147606583781671ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 67108864) == 16509467555140798205ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 134217728) == 8717281234694060584ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 268435456) == 8098476701725660537ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 536870912) == 16345871539461094006ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 1073741824) == 3755557000429964408ULL); ok1(hash64_stable(u8array, ARRAY_WORDS, 2147483648U) == 15017348801959710081ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 0) == 1038028831307724039ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 1) == 10155473272642627302ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 2) == 5714751190106841420ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 4) == 3923885607767527866ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 8) == 3931017318293995558ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 16) == 1469696588339313177ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 32) == 11522218526952715051ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 64) == 6953517591561958496ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 128) == 7406689491740052867ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 256) == 10101844489704093104ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 512) == 12511348870707245959ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 1024) == 1614019938016861468ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 2048) == 5294796182374592721ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 4096) == 16089570706643716675ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 8192) == 1689302638424579464ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 16384) == 1446340172370386893ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 32768) == 16535503506744393039ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 65536) == 3496794142527150328ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 131072) == 6568245367474548504ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 262144) == 9487676460765485949ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 524288) == 4519762130966530000ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 1048576) == 15623412069215340610ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 2097152) == 544013388676438108ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 4194304) == 5594904760290840266ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 8388608) == 18098755780041592043ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 16777216) == 6389168672387330316ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 33554432) == 896986127732419381ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 67108864) == 13232626471143901354ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 134217728) == 53378562890493093ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 268435456) == 10072361400297824771ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 536870912) == 14511948118285144529ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 1073741824) == 6981033484844447277ULL); ok1(hash64_stable(u16array, ARRAY_WORDS, 2147483648U) == 5619339091684126808ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 0) == 3037571077312110476ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 1) == 14732398743825071988ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 2) == 14949132158206672071ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 4) == 1291370080511561429ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 8) == 10792665964172133092ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 16) == 14250138032054339435ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 32) == 17136741522078732741ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 64) == 3260193403318236635ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 128) == 10526616652205653536ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 256) == 9019690373358576579ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 512) == 6997491436599677436ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 1024) == 18302783371416533798ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 2048) == 10149320644446516025ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 4096) == 7073759949410623868ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 8192) == 17442399482223760073ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 16384) == 2983906194216281861ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 32768) == 4975845419129060524ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 65536) == 594019910205413268ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 131072) == 11903010186073691112ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 262144) == 7339636527154847008ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 524288) == 15243305400579108736ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 1048576) == 16737926245392043198ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 2097152) == 15725083267699862972ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 4194304) == 12527834265678833794ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 8388608) == 13908436455987824848ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 16777216) == 9672773345173872588ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 33554432) == 2305314279896710501ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 67108864) == 1866733780381408751ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 134217728) == 11906263969465724709ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 268435456) == 5501594918093830069ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 536870912) == 15823785789276225477ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 1073741824) == 17353000723889475410ULL); ok1(hash64_stable(u32array, ARRAY_WORDS, 2147483648U) == 7494736910655503182ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 0) == 9765419389786481410ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 1) == 11182806172127114246ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 2) == 2559155171395472619ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 4) == 3311692033324815378ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 8) == 1297175419505333844ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 16) == 617896928653569210ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 32) == 1517398559958603553ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 64) == 4504821917445110758ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 128) == 1971743331114904452ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 256) == 6177667912354374306ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 512) == 15570521289777792458ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 1024) == 9204559632415917331ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 2048) == 9008982669760028237ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 4096) == 14803537660281700281ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 8192) == 2873966517448487327ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 16384) == 5859277625928363661ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 32768) == 15520461285618185970ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 65536) == 16746489793331175369ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 131072) == 514952025484227461ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 262144) == 10867212269810675249ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 524288) == 9822204377278314587ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 1048576) == 3295088921987850465ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 2097152) == 7559197431498053712ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 4194304) == 1667267269116771849ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 8388608) == 2916804068951374862ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 16777216) == 14422558383125688561ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 33554432) == 10083112683694342602ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 67108864) == 7222777647078298513ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 134217728) == 18424513674048212529ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 268435456) == 14913668581101810784ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 536870912) == 14377721174297902048ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 1073741824) == 6031715005667500948ULL); ok1(hash64_stable(u64array, ARRAY_WORDS, 2147483648U) == 4827100319722378642ULL); return exit_status(); } ntdb-1.0/lib/ccan/hash/test/run.c000066400000000000000000000074751224151530700166420ustar00rootroot00000000000000#include #include #include #include #include #define ARRAY_WORDS 5 int main(int argc, char *argv[]) { unsigned int i, j, k; uint32_t array[ARRAY_WORDS], val; char array2[sizeof(array) + sizeof(uint32_t)]; uint32_t results[256]; /* Initialize array. */ for (i = 0; i < ARRAY_WORDS; i++) array[i] = i; plan_tests(39); /* Hash should be the same, indep of memory alignment. */ val = hash(array, ARRAY_WORDS, 0); for (i = 0; i < sizeof(uint32_t); i++) { memcpy(array2 + i, array, sizeof(array)); ok(hash(array2 + i, ARRAY_WORDS, 0) != val, "hash matched at offset %i", i); } /* Hash of random values should have random distribution: * check one byte at a time. */ for (i = 0; i < sizeof(uint32_t); i++) { unsigned int lowest = -1U, highest = 0; memset(results, 0, sizeof(results)); for (j = 0; j < 256000; j++) { for (k = 0; k < ARRAY_WORDS; k++) array[k] = random(); results[(hash(array, ARRAY_WORDS, 0) >> i*8)&0xFF]++; } for (j = 0; j < 256; j++) { if (results[j] < lowest) lowest = results[j]; if (results[j] > highest) highest = results[j]; } /* Expect within 20% */ ok(lowest > 800, "Byte %i lowest %i", i, lowest); ok(highest < 1200, "Byte %i highest %i", i, highest); diag("Byte %i, range %u-%u", i, lowest, highest); } /* Hash of random values should have random distribution: * check one byte at a time. */ for (i = 0; i < sizeof(uint64_t); i++) { unsigned int lowest = -1U, highest = 0; memset(results, 0, sizeof(results)); for (j = 0; j < 256000; j++) { for (k = 0; k < ARRAY_WORDS; k++) array[k] = random(); results[(hash64(array, sizeof(array)/sizeof(uint64_t), 0) >> i*8)&0xFF]++; } for (j = 0; j < 256; j++) { if (results[j] < lowest) lowest = results[j]; if (results[j] > highest) highest = results[j]; } /* Expect within 20% */ ok(lowest > 800, "Byte %i lowest %i", i, lowest); ok(highest < 1200, "Byte %i highest %i", i, highest); diag("Byte %i, range %u-%u", i, lowest, highest); } /* Hash of pointer values should also have random distribution. */ for (i = 0; i < sizeof(uint32_t); i++) { unsigned int lowest = -1U, highest = 0; char *p = malloc(256000); memset(results, 0, sizeof(results)); for (j = 0; j < 256000; j++) results[(hash_pointer(p + j, 0) >> i*8)&0xFF]++; free(p); for (j = 0; j < 256; j++) { if (results[j] < lowest) lowest = results[j]; if (results[j] > highest) highest = results[j]; } /* Expect within 20% */ ok(lowest > 800, "hash_pointer byte %i lowest %i", i, lowest); ok(highest < 1200, "hash_pointer byte %i highest %i", i, highest); diag("hash_pointer byte %i, range %u-%u", i, lowest, highest); } if (sizeof(long) == sizeof(uint32_t)) ok1(hashl(array, ARRAY_WORDS, 0) == hash(array, ARRAY_WORDS, 0)); else ok1(hashl(array, ARRAY_WORDS, 0) == hash64(array, ARRAY_WORDS, 0)); /* String hash: weak, so only test bottom byte */ for (i = 0; i < 1; i++) { unsigned int num = 0, cursor, lowest = -1U, highest = 0; char p[5]; memset(results, 0, sizeof(results)); memset(p, 'A', sizeof(p)); p[sizeof(p)-1] = '\0'; for (;;) { for (cursor = 0; cursor < sizeof(p)-1; cursor++) { p[cursor]++; if (p[cursor] <= 'z') break; p[cursor] = 'A'; } if (cursor == sizeof(p)-1) break; results[(hash_string(p) >> i*8)&0xFF]++; num++; } for (j = 0; j < 256; j++) { if (results[j] < lowest) lowest = results[j]; if (results[j] > highest) highest = results[j]; } /* Expect within 20% */ ok(lowest > 35000, "hash_pointer byte %i lowest %i", i, lowest); ok(highest < 53000, "hash_pointer byte %i highest %i", i, highest); diag("hash_pointer byte %i, range %u-%u", i, lowest, highest); } return exit_status(); } ntdb-1.0/lib/ccan/htable/000077500000000000000000000000001224151530700152125ustar00rootroot00000000000000ntdb-1.0/lib/ccan/htable/LICENSE000066400000000000000000000636371224151530700162360ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/htable/_info000066400000000000000000000052301224151530700162270ustar00rootroot00000000000000#include #include /** * htable - hash table routines * * A hash table is an efficient structure for looking up keys. This version * grows with usage and allows efficient deletion. * * Example: * #include * #include * #include * #include * #include * * struct name_to_digit { * const char *name; * unsigned int val; * }; * * static struct name_to_digit map[] = { * { "zero", 0}, * { "one", 1 }, * { "two", 2 }, * { "three", 3 }, * { "four", 4 }, * { "five", 5 }, * { "six", 6 }, * { "seven", 7 }, * { "eight", 8 }, * { "nine", 9 } * }; * * // Wrapper for rehash function pointer. * static size_t rehash(const void *e, void *unused) * { * return hash_string(((struct name_to_digit *)e)->name); * } * * // Comparison function. * static bool streq(const void *e, void *string) * { * return strcmp(((struct name_to_digit *)e)->name, string) == 0; * } * * // We let them add their own aliases, eg. --alias=v=5 * static void add_alias(struct htable *ht, const char *alias) * { * char *eq; * struct name_to_digit *n; * * n = malloc(sizeof(*n)); * n->name = strdup(alias); * * eq = strchr(n->name, '='); * if (!eq || ((n->val = atoi(eq+1)) == 0 && !strcmp(eq+1, "0"))) * errx(1, "Usage: --alias=="); * *eq = '\0'; * htable_add(ht, hash_string(n->name), n); * } * * int main(int argc, char *argv[]) * { * struct htable ht; * unsigned int i; * unsigned long val; * * if (argc < 2) * errx(1, "Usage: %s [--alias==]... ...", * argv[0]); * * // Create and populate hash table. * htable_init(&ht, rehash, NULL); * for (i = 0; i < sizeof(map)/sizeof(map[0]); i++) * htable_add(&ht, hash_string(map[i].name), &map[i]); * * // Add any aliases to the hash table. * for (i = 1; i < argc; i++) { * if (!strncmp(argv[i], "--alias=", strlen("--alias="))) * add_alias(&ht, argv[i] + strlen("--alias=")); * else * break; * } * * // Find the other args in the hash table. * for (val = 0; i < argc; i++) { * struct name_to_digit *n; * n = htable_get(&ht, hash_string(argv[i]), * streq, argv[i]); * if (!n) * errx(1, "Invalid digit name %s", argv[i]); * // Append it to the value we are building up. * val *= 10; * val += n->val; * } * printf("%lu\n", val); * return 0; * } * * License: LGPL (v2.1 or any later version) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/compiler\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/htable/htable.c000066400000000000000000000145141224151530700166220ustar00rootroot00000000000000/* Licensed under LGPLv2+ - see LICENSE file for details */ #include #include #include #include #include #include /* We use 0x1 as deleted marker. */ #define HTABLE_DELETED (0x1) /* We clear out the bits which are always the same, and put metadata there. */ static inline uintptr_t get_extra_ptr_bits(const struct htable *ht, uintptr_t e) { return e & ht->common_mask; } static inline void *get_raw_ptr(const struct htable *ht, uintptr_t e) { return (void *)((e & ~ht->common_mask) | ht->common_bits); } static inline uintptr_t make_hval(const struct htable *ht, const void *p, uintptr_t bits) { return ((uintptr_t)p & ~ht->common_mask) | bits; } static inline bool entry_is_valid(uintptr_t e) { return e > HTABLE_DELETED; } static inline uintptr_t get_hash_ptr_bits(const struct htable *ht, size_t hash) { /* Shuffling the extra bits (as specified in mask) down the * end is quite expensive. But the lower bits are redundant, so * we fold the value first. */ return (hash ^ (hash >> ht->bits)) & ht->common_mask & ~ht->perfect_bit; } void htable_init(struct htable *ht, size_t (*rehash)(const void *elem, void *priv), void *priv) { struct htable empty = HTABLE_INITIALIZER(empty, NULL, NULL); *ht = empty; ht->rehash = rehash; ht->priv = priv; ht->table = &ht->perfect_bit; } void htable_clear(struct htable *ht) { if (ht->table != &ht->perfect_bit) free((void *)ht->table); htable_init(ht, ht->rehash, ht->priv); } static size_t hash_bucket(const struct htable *ht, size_t h) { return h & ((1 << ht->bits)-1); } static void *htable_val(const struct htable *ht, struct htable_iter *i, size_t hash, uintptr_t perfect) { uintptr_t h2 = get_hash_ptr_bits(ht, hash) | perfect; while (ht->table[i->off]) { if (ht->table[i->off] != HTABLE_DELETED) { if (get_extra_ptr_bits(ht, ht->table[i->off]) == h2) return get_raw_ptr(ht, ht->table[i->off]); } i->off = (i->off + 1) & ((1 << ht->bits)-1); h2 &= ~perfect; } return NULL; } void *htable_firstval(const struct htable *ht, struct htable_iter *i, size_t hash) { i->off = hash_bucket(ht, hash); return htable_val(ht, i, hash, ht->perfect_bit); } void *htable_nextval(const struct htable *ht, struct htable_iter *i, size_t hash) { i->off = (i->off + 1) & ((1 << ht->bits)-1); return htable_val(ht, i, hash, 0); } void *htable_first(const struct htable *ht, struct htable_iter *i) { for (i->off = 0; i->off < (size_t)1 << ht->bits; i->off++) { if (entry_is_valid(ht->table[i->off])) return get_raw_ptr(ht, ht->table[i->off]); } return NULL; } void *htable_next(const struct htable *ht, struct htable_iter *i) { for (i->off++; i->off < (size_t)1 << ht->bits; i->off++) { if (entry_is_valid(ht->table[i->off])) return get_raw_ptr(ht, ht->table[i->off]); } return NULL; } /* This does not expand the hash table, that's up to caller. */ static void ht_add(struct htable *ht, const void *new, size_t h) { size_t i; uintptr_t perfect = ht->perfect_bit; i = hash_bucket(ht, h); while (entry_is_valid(ht->table[i])) { perfect = 0; i = (i + 1) & ((1 << ht->bits)-1); } ht->table[i] = make_hval(ht, new, get_hash_ptr_bits(ht, h)|perfect); } static COLD bool double_table(struct htable *ht) { unsigned int i; size_t oldnum = (size_t)1 << ht->bits; uintptr_t *oldtable, e; oldtable = ht->table; ht->table = calloc(1 << (ht->bits+1), sizeof(size_t)); if (!ht->table) { ht->table = oldtable; return false; } ht->bits++; ht->max = ((size_t)3 << ht->bits) / 4; ht->max_with_deleted = ((size_t)9 << ht->bits) / 10; /* If we lost our "perfect bit", get it back now. */ if (!ht->perfect_bit && ht->common_mask) { for (i = 0; i < sizeof(ht->common_mask) * CHAR_BIT; i++) { if (ht->common_mask & ((size_t)1 << i)) { ht->perfect_bit = (size_t)1 << i; break; } } } if (oldtable != &ht->perfect_bit) { for (i = 0; i < oldnum; i++) { if (entry_is_valid(e = oldtable[i])) { void *p = get_raw_ptr(ht, e); ht_add(ht, p, ht->rehash(p, ht->priv)); } } free(oldtable); } ht->deleted = 0; return true; } static COLD void rehash_table(struct htable *ht) { size_t start, i; uintptr_t e; /* Beware wrap cases: we need to start from first empty bucket. */ for (start = 0; ht->table[start]; start++); for (i = 0; i < (size_t)1 << ht->bits; i++) { size_t h = (i + start) & ((1 << ht->bits)-1); e = ht->table[h]; if (!e) continue; if (e == HTABLE_DELETED) ht->table[h] = 0; else if (!(e & ht->perfect_bit)) { void *p = get_raw_ptr(ht, e); ht->table[h] = 0; ht_add(ht, p, ht->rehash(p, ht->priv)); } } ht->deleted = 0; } /* We stole some bits, now we need to put them back... */ static COLD void update_common(struct htable *ht, const void *p) { unsigned int i; uintptr_t maskdiff, bitsdiff; if (ht->elems == 0) { ht->common_mask = -1; ht->common_bits = (uintptr_t)p; ht->perfect_bit = 1; return; } /* Find bits which are unequal to old common set. */ maskdiff = ht->common_bits ^ ((uintptr_t)p & ht->common_mask); /* These are the bits which go there in existing entries. */ bitsdiff = ht->common_bits & maskdiff; for (i = 0; i < (size_t)1 << ht->bits; i++) { if (!entry_is_valid(ht->table[i])) continue; /* Clear the bits no longer in the mask, set them as * expected. */ ht->table[i] &= ~maskdiff; ht->table[i] |= bitsdiff; } /* Take away those bits from our mask, bits and perfect bit. */ ht->common_mask &= ~maskdiff; ht->common_bits &= ~maskdiff; ht->perfect_bit &= ~maskdiff; } bool htable_add(struct htable *ht, size_t hash, const void *p) { if (ht->elems+1 > ht->max && !double_table(ht)) return false; if (ht->elems+1 + ht->deleted > ht->max_with_deleted) rehash_table(ht); assert(p); if (((uintptr_t)p & ht->common_mask) != ht->common_bits) update_common(ht, p); ht_add(ht, p, hash); ht->elems++; return true; } bool htable_del(struct htable *ht, size_t h, const void *p) { struct htable_iter i; void *c; for (c = htable_firstval(ht,&i,h); c; c = htable_nextval(ht,&i,h)) { if (c == p) { htable_delval(ht, &i); return true; } } return false; } void htable_delval(struct htable *ht, struct htable_iter *i) { assert(i->off < (size_t)1 << ht->bits); assert(entry_is_valid(ht->table[i->off])); ht->elems--; ht->table[i->off] = HTABLE_DELETED; ht->deleted++; } ntdb-1.0/lib/ccan/htable/htable.h000066400000000000000000000112761224151530700166310ustar00rootroot00000000000000/* Licensed under LGPLv2+ - see LICENSE file for details */ #ifndef CCAN_HTABLE_H #define CCAN_HTABLE_H #include "config.h" #include #include #include /** * struct htable - private definition of a htable. * * It's exposed here so you can put it in your structures and so we can * supply inline functions. */ struct htable { size_t (*rehash)(const void *elem, void *priv); void *priv; unsigned int bits; size_t elems, deleted, max, max_with_deleted; /* These are the bits which are the same in all pointers. */ uintptr_t common_mask, common_bits; uintptr_t perfect_bit; uintptr_t *table; }; /** * HTABLE_INITIALIZER - static initialization for a hash table. * @name: name of this htable. * @rehash: hash function to use for rehashing. * @priv: private argument to @rehash function. * * This is useful for setting up static and global hash tables. * * Example: * // For simplicity's sake, say hash value is contents of elem. * static size_t rehash(const void *elem, void *unused) * { * return *(size_t *)elem; * } * static struct htable ht = HTABLE_INITIALIZER(ht, rehash, NULL); */ #define HTABLE_INITIALIZER(name, rehash, priv) \ { rehash, priv, 0, 0, 0, 0, 0, -1, 0, 0, &name.perfect_bit } /** * htable_init - initialize an empty hash table. * @ht: the hash table to initialize * @rehash: hash function to use for rehashing. * @priv: private argument to @rehash function. */ void htable_init(struct htable *ht, size_t (*rehash)(const void *elem, void *priv), void *priv); /** * htable_clear - empty a hash table. * @ht: the hash table to clear * * This doesn't do anything to any pointers left in it. */ void htable_clear(struct htable *ht); /** * htable_rehash - use a hashtree's rehash function * @elem: the argument to rehash() * */ size_t htable_rehash(const void *elem); /** * htable_add - add a pointer into a hash table. * @ht: the htable * @hash: the hash value of the object * @p: the non-NULL pointer * * Also note that this can only fail due to allocation failure. Otherwise, it * returns true. */ bool htable_add(struct htable *ht, size_t hash, const void *p); /** * htable_del - remove a pointer from a hash table * @ht: the htable * @hash: the hash value of the object * @p: the pointer * * Returns true if the pointer was found (and deleted). */ bool htable_del(struct htable *ht, size_t hash, const void *p); /** * struct htable_iter - iterator or htable_first or htable_firstval etc. * * This refers to a location inside the hashtable. */ struct htable_iter { size_t off; }; /** * htable_firstval - find a candidate for a given hash value * @htable: the hashtable * @i: the struct htable_iter to initialize * @hash: the hash value * * You'll need to check the value is what you want; returns NULL if none. * See Also: * htable_delval() */ void *htable_firstval(const struct htable *htable, struct htable_iter *i, size_t hash); /** * htable_nextval - find another candidate for a given hash value * @htable: the hashtable * @i: the struct htable_iter to initialize * @hash: the hash value * * You'll need to check the value is what you want; returns NULL if no more. */ void *htable_nextval(const struct htable *htable, struct htable_iter *i, size_t hash); /** * htable_get - find an entry in the hash table * @ht: the hashtable * @h: the hash value of the entry * @cmp: the comparison function * @ptr: the pointer to hand to the comparison function. * * Convenient inline wrapper for htable_firstval/htable_nextval loop. */ static inline void *htable_get(const struct htable *ht, size_t h, bool (*cmp)(const void *candidate, void *ptr), const void *ptr) { struct htable_iter i; void *c; for (c = htable_firstval(ht,&i,h); c; c = htable_nextval(ht,&i,h)) { if (cmp(c, (void *)ptr)) return c; } return NULL; } /** * htable_first - find an entry in the hash table * @ht: the hashtable * @i: the struct htable_iter to initialize * * Get an entry in the hashtable; NULL if empty. */ void *htable_first(const struct htable *htable, struct htable_iter *i); /** * htable_next - find another entry in the hash table * @ht: the hashtable * @i: the struct htable_iter to use * * Get another entry in the hashtable; NULL if all done. * This is usually used after htable_first or prior non-NULL htable_next. */ void *htable_next(const struct htable *htable, struct htable_iter *i); /** * htable_delval - remove an iterated pointer from a hash table * @ht: the htable * @i: the htable_iter * * Usually used to delete a hash entry after it has been found with * htable_firstval etc. */ void htable_delval(struct htable *ht, struct htable_iter *i); #endif /* CCAN_HTABLE_H */ ntdb-1.0/lib/ccan/htable/htable_type.h000066400000000000000000000071061224151530700176670ustar00rootroot00000000000000/* Licensed under LGPLv2+ - see LICENSE file for details */ #ifndef CCAN_HTABLE_TYPE_H #define CCAN_HTABLE_TYPE_H #include #include "config.h" /** * HTABLE_DEFINE_TYPE - create a set of htable ops for a type * @type: a type whose pointers will be values in the hash. * @keyof: a function/macro to extract a key: @keyof(const type *elem) * @hashfn: a hash function for a @key: size_t @hashfn(const *) * @eqfn: an equality function keys: bool @eqfn(const type *, const *) * @prefix: a prefix for all the functions to define (of form _*) * * NULL values may not be placed into the hash table. * * This defines the type hashtable type and an iterator type: * struct ; * struct _iter; * * It also defines initialization and freeing functions: * void _init(struct *); * void _clear(struct *); * * Add function only fails if we run out of memory: * bool _add(struct *ht, const *e); * * Delete and delete-by key return true if it was in the set: * bool _del(struct *ht, const *e); * bool _delkey(struct *ht, const *k); * * Find function return the matching element, or NULL: * type *_get(const struct @name *ht, const *k); * * Iteration over hashtable is also supported: * type *_first(const struct *ht, struct _iter *i); * type *_next(const struct *ht, struct _iter *i); * * It's currently safe to iterate over a changing hashtable, but you might * miss an element. Iteration isn't very efficient, either. * * You can use HTABLE_INITIALIZER like so: * struct ht = { HTABLE_INITIALIZER(ht.raw, _hash, NULL) }; */ #define HTABLE_DEFINE_TYPE(type, keyof, hashfn, eqfn, name) \ struct name { struct htable raw; }; \ struct name##_iter { struct htable_iter i; }; \ static inline size_t name##_hash(const void *elem, void *priv) \ { \ return hashfn(keyof((const type *)elem)); \ } \ static inline void name##_init(struct name *ht) \ { \ htable_init(&ht->raw, name##_hash, NULL); \ } \ static inline void name##_clear(struct name *ht) \ { \ htable_clear(&ht->raw); \ } \ static inline bool name##_add(struct name *ht, const type *elem) \ { \ return htable_add(&ht->raw, hashfn(keyof(elem)), elem); \ } \ static inline bool name##_del(struct name *ht, const type *elem) \ { \ return htable_del(&ht->raw, hashfn(keyof(elem)), elem); \ } \ static inline type *name##_get(const struct name *ht, \ const HTABLE_KTYPE(keyof) k) \ { \ /* Typecheck for eqfn */ \ (void)sizeof(eqfn((const type *)NULL, \ keyof((const type *)NULL))); \ return htable_get(&ht->raw, \ hashfn(k), \ (bool (*)(const void *, void *))(eqfn), \ k); \ } \ static inline bool name##_delkey(struct name *ht, \ const HTABLE_KTYPE(keyof) k) \ { \ type *elem = name##_get(ht, k); \ if (elem) \ return name##_del(ht, elem); \ return false; \ } \ static inline type *name##_first(const struct name *ht, \ struct name##_iter *iter) \ { \ return htable_first(&ht->raw, &iter->i); \ } \ static inline type *name##_next(const struct name *ht, \ struct name##_iter *iter) \ { \ return htable_next(&ht->raw, &iter->i); \ } #if HAVE_TYPEOF #define HTABLE_KTYPE(keyof) typeof(keyof(NULL)) #else #define HTABLE_KTYPE(keyof) void * #endif #endif /* CCAN_HTABLE_TYPE_H */ ntdb-1.0/lib/ccan/htable/test/000077500000000000000000000000001224151530700161715ustar00rootroot00000000000000ntdb-1.0/lib/ccan/htable/test/run-size.c000066400000000000000000000013141224151530700201100ustar00rootroot00000000000000#include #include #include #include #include #define NUM_VALS 512 /* We use the number divided by two as the hash (for lots of collisions). */ static size_t hash(const void *elem, void *unused) { size_t h = *(uint64_t *)elem / 2; return h; } int main(int argc, char *argv[]) { struct htable ht; uint64_t val[NUM_VALS]; unsigned int i; plan_tests((NUM_VALS) * 2); for (i = 0; i < NUM_VALS; i++) val[i] = i; htable_init(&ht, hash, NULL); for (i = 0; i < NUM_VALS; i++) { ok1(ht.max >= i); ok1(ht.max <= i * 2); htable_add(&ht, hash(&val[i], NULL), &val[i]); } htable_clear(&ht); return exit_status(); } ntdb-1.0/lib/ccan/htable/test/run-type.c000066400000000000000000000075611224151530700201310ustar00rootroot00000000000000#include #include #include #include #include #define NUM_BITS 7 #define NUM_VALS (1 << NUM_BITS) struct obj { /* Makes sure we don't try to treat and obj as a key or vice versa */ unsigned char unused; unsigned int key; }; static const unsigned int *objkey(const struct obj *obj) { return &obj->key; } /* We use the number divided by two as the hash (for lots of collisions), plus set all the higher bits so we can detect if they don't get masked out. */ static size_t objhash(const unsigned int *key) { size_t h = *key / 2; h |= -1UL << NUM_BITS; return h; } static bool cmp(const struct obj *obj, const unsigned int *key) { return obj->key == *key; } HTABLE_DEFINE_TYPE(struct obj, objkey, objhash, cmp, htable_obj); static void add_vals(struct htable_obj *ht, struct obj val[], unsigned int num) { unsigned int i; for (i = 0; i < num; i++) { if (htable_obj_get(ht, &i)) { fail("%u already in hash", i); return; } htable_obj_add(ht, &val[i]); if (htable_obj_get(ht, &i) != &val[i]) { fail("%u not added to hash", i); return; } } pass("Added %u numbers to hash", i); } static void find_vals(const struct htable_obj *ht, const struct obj val[], unsigned int num) { unsigned int i; for (i = 0; i < num; i++) { if (htable_obj_get(ht, &i) != &val[i]) { fail("%u not found in hash", i); return; } } pass("Found %u numbers in hash", i); } static void del_vals(struct htable_obj *ht, const struct obj val[], unsigned int num) { unsigned int i; for (i = 0; i < num; i++) { if (!htable_obj_delkey(ht, &val[i].key)) { fail("%u not deleted from hash", i); return; } } pass("Deleted %u numbers in hash", i); } static void del_vals_bykey(struct htable_obj *ht, const struct obj val[], unsigned int num) { unsigned int i; for (i = 0; i < num; i++) { if (!htable_obj_delkey(ht, &i)) { fail("%u not deleted by key from hash", i); return; } } pass("Deleted %u numbers by key from hash", i); } static bool check_mask(struct htable *ht, const struct obj val[], unsigned num) { uint64_t i; for (i = 0; i < num; i++) { if (((uintptr_t)&val[i] & ht->common_mask) != ht->common_bits) return false; } return true; } int main(int argc, char *argv[]) { unsigned int i; struct htable_obj ht; struct obj val[NUM_VALS]; unsigned int dne; void *p; struct htable_obj_iter iter; plan_tests(20); for (i = 0; i < NUM_VALS; i++) val[i].key = i; dne = i; htable_obj_init(&ht); ok1(ht.raw.max == 0); ok1(ht.raw.bits == 0); /* We cannot find an entry which doesn't exist. */ ok1(!htable_obj_get(&ht, &dne)); /* Fill it, it should increase in size. */ add_vals(&ht, val, NUM_VALS); ok1(ht.raw.bits == NUM_BITS + 1); ok1(ht.raw.max < (1 << ht.raw.bits)); /* Mask should be set. */ ok1(ht.raw.common_mask != 0); ok1(ht.raw.common_mask != -1); ok1(check_mask(&ht.raw, val, NUM_VALS)); /* Find all. */ find_vals(&ht, val, NUM_VALS); ok1(!htable_obj_get(&ht, &dne)); /* Walk once, should get them all. */ i = 0; for (p = htable_obj_first(&ht,&iter); p; p = htable_obj_next(&ht, &iter)) i++; ok1(i == NUM_VALS); /* Delete all. */ del_vals(&ht, val, NUM_VALS); ok1(!htable_obj_get(&ht, &val[0].key)); /* Worst case, a "pointer" which doesn't have any matching bits. */ htable_add(&ht.raw, 0, (void *)~(uintptr_t)&val[NUM_VALS-1]); htable_obj_add(&ht, &val[NUM_VALS-1]); ok1(ht.raw.common_mask == 0); ok1(ht.raw.common_bits == 0); /* Delete the bogus one before we trip over it. */ htable_del(&ht.raw, 0, (void *)~(uintptr_t)&val[NUM_VALS-1]); /* Add the rest. */ add_vals(&ht, val, NUM_VALS-1); /* Check we can find them all. */ find_vals(&ht, val, NUM_VALS); ok1(!htable_obj_get(&ht, &dne)); /* Delete them all by key. */ del_vals_bykey(&ht, val, NUM_VALS); htable_obj_clear(&ht); return exit_status(); } ntdb-1.0/lib/ccan/htable/test/run.c000066400000000000000000000106751224151530700171520ustar00rootroot00000000000000#include #include #include #include #include #define NUM_BITS 7 #define NUM_VALS (1 << NUM_BITS) /* We use the number divided by two as the hash (for lots of collisions), plus set all the higher bits so we can detect if they don't get masked out. */ static size_t hash(const void *elem, void *unused) { size_t h = *(uint64_t *)elem / 2; h |= -1UL << NUM_BITS; return h; } static bool objcmp(const void *htelem, void *cmpdata) { return *(uint64_t *)htelem == *(uint64_t *)cmpdata; } static void add_vals(struct htable *ht, const uint64_t val[], unsigned int off, unsigned int num) { uint64_t i; for (i = off; i < off+num; i++) { if (htable_get(ht, hash(&i, NULL), objcmp, &i)) { fail("%llu already in hash", (long long)i); return; } htable_add(ht, hash(&val[i], NULL), &val[i]); if (htable_get(ht, hash(&i, NULL), objcmp, &i) != &val[i]) { fail("%llu not added to hash", (long long)i); return; } } pass("Added %llu numbers to hash", (long long)i); } #if 0 static void refill_vals(struct htable *ht, const uint64_t val[], unsigned int num) { uint64_t i; for (i = 0; i < num; i++) { if (htable_get(ht, hash(&i, NULL), objcmp, &i)) continue; htable_add(ht, hash(&val[i], NULL), &val[i]); } } #endif static void find_vals(struct htable *ht, const uint64_t val[], unsigned int num) { uint64_t i; for (i = 0; i < num; i++) { if (htable_get(ht, hash(&i, NULL), objcmp, &i) != &val[i]) { fail("%llu not found in hash", (long long)i); return; } } pass("Found %llu numbers in hash", (long long)i); } static void del_vals(struct htable *ht, const uint64_t val[], unsigned int num) { uint64_t i; for (i = 0; i < num; i++) { if (!htable_del(ht, hash(&val[i], NULL), &val[i])) { fail("%llu not deleted from hash", (long long)i); return; } } pass("Deleted %llu numbers in hash", (long long)i); } static bool check_mask(struct htable *ht, uint64_t val[], unsigned num) { uint64_t i; for (i = 0; i < num; i++) { if (((uintptr_t)&val[i] & ht->common_mask) != ht->common_bits) return false; } return true; } int main(int argc, char *argv[]) { unsigned int i; uintptr_t perfect_bit; struct htable ht; uint64_t val[NUM_VALS]; uint64_t dne; void *p; struct htable_iter iter; plan_tests(29); for (i = 0; i < NUM_VALS; i++) val[i] = i; dne = i; htable_init(&ht, hash, NULL); ok1(ht.max == 0); ok1(ht.bits == 0); /* We cannot find an entry which doesn't exist. */ ok1(!htable_get(&ht, hash(&dne, NULL), objcmp, &dne)); /* This should increase it once. */ add_vals(&ht, val, 0, 1); ok1(ht.bits == 1); ok1(ht.max == 1); ok1(ht.common_mask == -1); /* Mask should be set. */ ok1(check_mask(&ht, val, 1)); /* This should increase it again. */ add_vals(&ht, val, 1, 1); ok1(ht.bits == 2); ok1(ht.max == 3); /* Mask should be set. */ ok1(ht.common_mask != 0); ok1(ht.common_mask != -1); ok1(check_mask(&ht, val, 2)); /* Now do the rest. */ add_vals(&ht, val, 2, NUM_VALS - 2); /* Find all. */ find_vals(&ht, val, NUM_VALS); ok1(!htable_get(&ht, hash(&dne, NULL), objcmp, &dne)); /* Walk once, should get them all. */ i = 0; for (p = htable_first(&ht,&iter); p; p = htable_next(&ht, &iter)) i++; ok1(i == NUM_VALS); /* Delete all. */ del_vals(&ht, val, NUM_VALS); ok1(!htable_get(&ht, hash(&val[0], NULL), objcmp, &val[0])); /* Worst case, a "pointer" which doesn't have any matching bits. */ htable_add(&ht, 0, (void *)~(uintptr_t)&val[NUM_VALS-1]); htable_add(&ht, hash(&val[NUM_VALS-1], NULL), &val[NUM_VALS-1]); ok1(ht.common_mask == 0); ok1(ht.common_bits == 0); /* Get rid of bogus pointer before we trip over it! */ htable_del(&ht, 0, (void *)~(uintptr_t)&val[NUM_VALS-1]); /* Add the rest. */ add_vals(&ht, val, 0, NUM_VALS-1); /* Check we can find them all. */ find_vals(&ht, val, NUM_VALS); ok1(!htable_get(&ht, hash(&dne, NULL), objcmp, &dne)); /* Corner cases: wipe out the perfect bit using bogus pointer. */ htable_clear(&ht); htable_add(&ht, 0, (void *)((uintptr_t)&val[NUM_VALS-1])); ok1(ht.perfect_bit); perfect_bit = ht.perfect_bit; htable_add(&ht, 0, (void *)((uintptr_t)&val[NUM_VALS-1] | perfect_bit)); ok1(ht.perfect_bit == 0); htable_del(&ht, 0, (void *)((uintptr_t)&val[NUM_VALS-1] | perfect_bit)); /* Enlarging should restore it... */ add_vals(&ht, val, 0, NUM_VALS-1); ok1(ht.perfect_bit != 0); htable_clear(&ht); return exit_status(); } ntdb-1.0/lib/ccan/htable/tools/000077500000000000000000000000001224151530700163525ustar00rootroot00000000000000ntdb-1.0/lib/ccan/htable/tools/Makefile000066400000000000000000000010771224151530700200170ustar00rootroot00000000000000CFLAGS=-Wall -Werror -O3 -I../../.. #CFLAGS=-Wall -Werror -g -I../../.. all: speed stringspeed hsearchspeed speed: speed.o hash.o speed.o: speed.c ../htable.h ../htable.c hash.o: ../../hash/hash.c $(CC) $(CFLAGS) -c -o $@ $< stringspeed: stringspeed.o hash.o ../../talloc.o ../../str_talloc.o ../../grab_file.o ../../str.o ../../time.o ../../noerr.o stringspeed.o: speed.c ../htable.h ../htable.c hsearchspeed: hsearchspeed.o ../../talloc.o ../../str_talloc.o ../../grab_file.o ../../str.o ../../time.o ../../noerr.o clean: rm -f stringspeed speed hsearchspeed *.o ntdb-1.0/lib/ccan/htable/tools/hsearchspeed.c000066400000000000000000000050021224151530700211510ustar00rootroot00000000000000/* Simple speed tests for a hash of strings using hsearch */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Nanoseconds per operation */ static size_t normalize(const struct timeval *start, const struct timeval *stop, unsigned int num) { struct timeval diff; timersub(stop, start, &diff); /* Floating point is more accurate here. */ return (double)(diff.tv_sec * 1000000 + diff.tv_usec) / num * 1000; } int main(int argc, char *argv[]) { size_t i, j, num; struct timeval start, stop; char **w; ENTRY *words, *misswords; w = strsplit(NULL, grab_file(NULL, argv[1] ? argv[1] : "/usr/share/dict/words", NULL), "\n"); num = talloc_array_length(w) - 1; printf("%zu words\n", num); hcreate(num+num/3); words = talloc_array(w, ENTRY, num); for (i = 0; i < num; i++) { words[i].key = w[i]; words[i].data = words[i].key; } /* Append and prepend last char for miss testing. */ misswords = talloc_array(w, ENTRY, num); for (i = 0; i < num; i++) { char lastc; if (strlen(w[i])) lastc = w[i][strlen(w[i])-1]; else lastc = 'z'; misswords[i].key = talloc_asprintf(misswords, "%c%s%c%c", lastc, w[i], lastc, lastc); } printf("#01: Initial insert: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) hsearch(words[i], ENTER); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#02: Initial lookup (match): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) if (hsearch(words[i], FIND)->data != words[i].data) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#03: Initial lookup (miss): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) { if (hsearch(misswords[i], FIND)) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Lookups in order are very cache-friendly for judy; try random */ printf("#04: Initial lookup (random): "); fflush(stdout); start = time_now(); for (i = 0, j = 0; i < num; i++, j = (j + 10007) % num) if (hsearch(words[i], FIND)->data != words[i].data) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); return 0; } ntdb-1.0/lib/ccan/htable/tools/speed.c000066400000000000000000000233251224151530700176230ustar00rootroot00000000000000/* Simple speed tests for hashtables. */ #include #include #include #include #include #include #include #include #include static size_t hashcount; struct object { /* The key. */ unsigned int key; /* Some contents. Doubles as consistency check. */ struct object *self; }; static const unsigned int *objkey(const struct object *obj) { return &obj->key; } static size_t hash_obj(const unsigned int *key) { hashcount++; return hashl(key, 1, 0); } static bool cmp(const struct object *object, const unsigned int *key) { return object->key == *key; } HTABLE_DEFINE_TYPE(struct object, objkey, hash_obj, cmp, htable_obj); static unsigned int popcount(unsigned long val) { #if HAVE_BUILTIN_POPCOUNTL return __builtin_popcountl(val); #else if (sizeof(long) == sizeof(u64)) { u64 v = val; v = (v & 0x5555555555555555ULL) + ((v >> 1) & 0x5555555555555555ULL); v = (v & 0x3333333333333333ULL) + ((v >> 1) & 0x3333333333333333ULL); v = (v & 0x0F0F0F0F0F0F0F0FULL) + ((v >> 1) & 0x0F0F0F0F0F0F0F0FULL); v = (v & 0x00FF00FF00FF00FFULL) + ((v >> 1) & 0x00FF00FF00FF00FFULL); v = (v & 0x0000FFFF0000FFFFULL) + ((v >> 1) & 0x0000FFFF0000FFFFULL); v = (v & 0x00000000FFFFFFFFULL) + ((v >> 1) & 0x00000000FFFFFFFFULL); return v; } val = (val & 0x55555555ULL) + ((val >> 1) & 0x55555555ULL); val = (val & 0x33333333ULL) + ((val >> 1) & 0x33333333ULL); val = (val & 0x0F0F0F0FULL) + ((val >> 1) & 0x0F0F0F0FULL); val = (val & 0x00FF00FFULL) + ((val >> 1) & 0x00FF00FFULL); val = (val & 0x0000FFFFULL) + ((val >> 1) & 0x0000FFFFULL); return val; #endif } static size_t perfect(const struct htable *ht) { size_t i, placed_perfect = 0; for (i = 0; i < ((size_t)1 << ht->bits); i++) { if (!entry_is_valid(ht->table[i])) continue; if (hash_bucket(ht, ht->rehash(get_raw_ptr(ht, ht->table[i]), ht->priv)) == i) { assert((ht->table[i] & ht->perfect_bit) == ht->perfect_bit); placed_perfect++; } } return placed_perfect; } static size_t count_deleted(const struct htable *ht) { size_t i, delete_markers = 0; for (i = 0; i < ((size_t)1 << ht->bits); i++) { if (ht->table[i] == HTABLE_DELETED) delete_markers++; } return delete_markers; } /* Nanoseconds per operation */ static size_t normalize(const struct timeval *start, const struct timeval *stop, unsigned int num) { struct timeval diff; timersub(stop, start, &diff); /* Floating point is more accurate here. */ return (double)(diff.tv_sec * 1000000 + diff.tv_usec) / num * 1000; } static size_t worst_run(struct htable *ht, size_t *deleted) { size_t longest = 0, len = 0, this_del = 0, i; *deleted = 0; /* This doesn't take into account end-wrap, but gives an idea. */ for (i = 0; i < ((size_t)1 << ht->bits); i++) { if (ht->table[i]) { len++; if (ht->table[i] == HTABLE_DELETED) this_del++; } else { if (len > longest) { longest = len; *deleted = this_del; } len = 0; this_del = 0; } } return longest; } int main(int argc, char *argv[]) { struct object *objs; size_t i, j, num, deleted; struct timeval start, stop; struct htable_obj ht; bool make_dumb = false; if (argv[1] && strcmp(argv[1], "--dumb") == 0) { argv++; make_dumb = true; } num = argv[1] ? atoi(argv[1]) : 1000000; objs = calloc(num, sizeof(objs[0])); for (i = 0; i < num; i++) { objs[i].key = i; objs[i].self = &objs[i]; } htable_obj_init(&ht); printf("Initial insert: "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i++) htable_obj_add(&ht, objs[i].self); gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Details: hash size %u, mask bits %u, perfect %.0f%%\n", 1U << ht.raw.bits, popcount(ht.raw.common_mask), perfect(&ht.raw) * 100.0 / ht.raw.elems); if (make_dumb) { /* Screw with mask, to hobble us. */ update_common(&ht.raw, (void *)~ht.raw.common_bits); printf("Details: DUMB MODE: mask bits %u\n", popcount(ht.raw.common_mask)); } printf("Initial lookup (match): "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i++) if (htable_obj_get(&ht, &i)->self != objs[i].self) abort(); gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Initial lookup (miss): "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i++) { unsigned int n = i + num; if (htable_obj_get(&ht, &n)) abort(); } gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Lookups in order are very cache-friendly for judy; try random */ printf("Initial lookup (random): "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0, j = 0; i < num; i++, j = (j + 10007) % num) if (htable_obj_get(&ht, &j)->self != &objs[j]) abort(); gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); hashcount = 0; printf("Initial delete all: "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i++) if (!htable_obj_del(&ht, objs[i].self)) abort(); gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Details: rehashes %zu\n", hashcount); printf("Initial re-inserting: "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i++) htable_obj_add(&ht, objs[i].self); gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); hashcount = 0; printf("Deleting first half: "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i+=2) if (!htable_obj_del(&ht, objs[i].self)) abort(); gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Details: rehashes %zu, delete markers %zu\n", hashcount, count_deleted(&ht.raw)); printf("Adding (a different) half: "); fflush(stdout); for (i = 0; i < num; i+=2) objs[i].key = num+i; gettimeofday(&start, NULL); for (i = 0; i < num; i+=2) htable_obj_add(&ht, objs[i].self); gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Details: delete markers %zu, perfect %.0f%%\n", count_deleted(&ht.raw), perfect(&ht.raw) * 100.0 / ht.raw.elems); printf("Lookup after half-change (match): "); fflush(stdout); gettimeofday(&start, NULL); for (i = 1; i < num; i+=2) if (htable_obj_get(&ht, &i)->self != objs[i].self) abort(); for (i = 0; i < num; i+=2) { unsigned int n = i + num; if (htable_obj_get(&ht, &n)->self != objs[i].self) abort(); } gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Lookup after half-change (miss): "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i++) { unsigned int n = i + num * 2; if (htable_obj_get(&ht, &n)) abort(); } gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Hashtables with delete markers can fill with markers over time. * so do some changes to see how it operates in long-term. */ for (i = 0; i < 5; i++) { if (i == 0) { /* We don't measure this: jmap is different. */ printf("Details: initial churn\n"); } else { printf("Churning %s time: ", i == 1 ? "second" : i == 2 ? "third" : i == 3 ? "fourth" : "fifth"); fflush(stdout); } gettimeofday(&start, NULL); for (j = 0; j < num; j++) { if (!htable_obj_del(&ht, &objs[j])) abort(); objs[j].key = num*i+j; if (!htable_obj_add(&ht, &objs[j])) abort(); } gettimeofday(&stop, NULL); if (i != 0) printf(" %zu ns\n", normalize(&start, &stop, num)); } /* Spread out the keys more to try to make it harder. */ printf("Details: reinserting with spread\n"); for (i = 0; i < num; i++) { if (!htable_obj_del(&ht, objs[i].self)) abort(); objs[i].key = num * 5 + i * 9; if (!htable_obj_add(&ht, objs[i].self)) abort(); } printf("Details: delete markers %zu, perfect %.0f%%\n", count_deleted(&ht.raw), perfect(&ht.raw) * 100.0 / ht.raw.elems); i = worst_run(&ht.raw, &deleted); printf("Details: worst run %zu (%zu deleted)\n", i, deleted); printf("Lookup after churn & spread (match): "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i++) { unsigned int n = num * 5 + i * 9; if (htable_obj_get(&ht, &n)->self != objs[i].self) abort(); } gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Lookup after churn & spread (miss): "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i++) { unsigned int n = num * (5 + 9) + i * 9; if (htable_obj_get(&ht, &n)) abort(); } gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Lookup after churn & spread (random): "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0, j = 0; i < num; i++, j = (j + 10007) % num) { unsigned int n = num * 5 + j * 9; if (htable_obj_get(&ht, &n)->self != &objs[j]) abort(); } gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); hashcount = 0; printf("Deleting half after churn & spread: "); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i+=2) if (!htable_obj_del(&ht, objs[i].self)) abort(); gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Adding (a different) half after churn & spread: "); fflush(stdout); for (i = 0; i < num; i+=2) objs[i].key = num*6+i*9; gettimeofday(&start, NULL); for (i = 0; i < num; i+=2) htable_obj_add(&ht, objs[i].self); gettimeofday(&stop, NULL); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Details: delete markers %zu, perfect %.0f%%\n", count_deleted(&ht.raw), perfect(&ht.raw) * 100.0 / ht.raw.elems); return 0; } ntdb-1.0/lib/ccan/htable/tools/stringspeed.c000066400000000000000000000142221224151530700210460ustar00rootroot00000000000000/* Simple speed tests for a hash of strings. */ #include #include #include #include #include #include #include #include #include #include #include #include #include static size_t hashcount; static const char *strkey(const char *str) { return str; } static size_t hash_str(const char *key) { hashcount++; return hash(key, strlen(key), 0); } static bool cmp(const char *obj, const char *key) { return strcmp(obj, key) == 0; } HTABLE_DEFINE_TYPE(char, strkey, hash_str, cmp, htable_str); /* Nanoseconds per operation */ static size_t normalize(const struct timeval *start, const struct timeval *stop, unsigned int num) { struct timeval diff; timersub(stop, start, &diff); /* Floating point is more accurate here. */ return (double)(diff.tv_sec * 1000000 + diff.tv_usec) / num * 1000; } int main(int argc, char *argv[]) { size_t i, j, num; struct timeval start, stop; struct htable_str ht; char **words, **misswords; words = strsplit(NULL, grab_file(NULL, argv[1] ? argv[1] : "/usr/share/dict/words", NULL), "\n"); htable_str_init(&ht); num = talloc_array_length(words) - 1; /* Note that on my system, num is just > 98304, where we double! */ printf("%zu words\n", num); /* Append and prepend last char for miss testing. */ misswords = talloc_array(words, char *, num); for (i = 0; i < num; i++) { char lastc; if (strlen(words[i])) lastc = words[i][strlen(words[i])-1]; else lastc = 'z'; misswords[i] = talloc_asprintf(misswords, "%c%s%c%c", lastc, words[i], lastc, lastc); } printf("#01: Initial insert: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) htable_str_add(&ht, words[i]); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("Bytes allocated: %zu\n", sizeof(ht.raw.table[0]) << ht.raw.bits); printf("#02: Initial lookup (match): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) if (htable_str_get(&ht, words[i]) != words[i]) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#03: Initial lookup (miss): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) { if (htable_str_get(&ht, misswords[i])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Lookups in order are very cache-friendly for judy; try random */ printf("#04: Initial lookup (random): "); fflush(stdout); start = time_now(); for (i = 0, j = 0; i < num; i++, j = (j + 10007) % num) if (htable_str_get(&ht, words[j]) != words[j]) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); hashcount = 0; printf("#05: Initial delete all: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) if (!htable_str_del(&ht, words[i])) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#06: Initial re-inserting: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) htable_str_add(&ht, words[i]); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); hashcount = 0; printf("#07: Deleting first half: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i+=2) if (!htable_str_del(&ht, words[i])) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#08: Adding (a different) half: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i+=2) htable_str_add(&ht, misswords[i]); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#09: Lookup after half-change (match): "); fflush(stdout); start = time_now(); for (i = 1; i < num; i+=2) if (htable_str_get(&ht, words[i]) != words[i]) abort(); for (i = 0; i < num; i+=2) { if (htable_str_get(&ht, misswords[i]) != misswords[i]) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#10: Lookup after half-change (miss): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i+=2) if (htable_str_get(&ht, words[i])) abort(); for (i = 1; i < num; i+=2) { if (htable_str_get(&ht, misswords[i])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Hashtables with delete markers can fill with markers over time. * so do some changes to see how it operates in long-term. */ printf("#11: Churn 1: "); start = time_now(); for (j = 0; j < num; j+=2) { if (!htable_str_del(&ht, misswords[j])) abort(); if (!htable_str_add(&ht, words[j])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#12: Churn 2: "); start = time_now(); for (j = 1; j < num; j+=2) { if (!htable_str_del(&ht, words[j])) abort(); if (!htable_str_add(&ht, misswords[j])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#13: Churn 3: "); start = time_now(); for (j = 1; j < num; j+=2) { if (!htable_str_del(&ht, misswords[j])) abort(); if (!htable_str_add(&ht, words[j])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Now it's back to normal... */ printf("#14: Post-Churn lookup (match): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) if (htable_str_get(&ht, words[i]) != words[i]) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#15: Post-Churn lookup (miss): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) { if (htable_str_get(&ht, misswords[i])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Lookups in order are very cache-friendly for judy; try random */ printf("#16: Post-Churn lookup (random): "); fflush(stdout); start = time_now(); for (i = 0, j = 0; i < num; i++, j = (j + 10007) % num) if (htable_str_get(&ht, words[j]) != words[j]) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); return 0; } ntdb-1.0/lib/ccan/ilog/000077500000000000000000000000001224151530700147055ustar00rootroot00000000000000ntdb-1.0/lib/ccan/ilog/LICENSE000066400000000000000000000636351224151530700157270ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/ilog/_info000066400000000000000000000026611224151530700157270ustar00rootroot00000000000000/** * ilog - Integer logarithm. * * ilog_32() and ilog_64() compute the minimum number of bits required to store * an unsigned 32-bit or 64-bit value without any leading zero bits. * This can also be thought of as the location of the highest set bit, with * counting starting from one (so that 0 returns 0, 1 returns 1, and 2**31 * returns 32). * When the value is known to be non-zero ilog32_nz() and ilog64_nz() can * compile into as few as two instructions, one of which may get optimized out * later. * STATIC_ILOG_32 and STATIC_ILOG_64 allow computation on compile-time * constants, so other compile-time constants can be derived from them. * * Example: * #include * #include * #include * * int main(void){ * int i; * printf("ilog32(0x%08X)=%i\n",0,ilog32(0)); * for(i=1;i<=STATIC_ILOG_32(USHRT_MAX);i++){ * uint32_t v; * v=(uint32_t)1U<<(i-1); * //Here we know v is non-zero, so we can use ilog32_nz(). * printf("ilog32(0x%08X)=%i\n",v,ilog32_nz(v)); * } * return 0; * } * * License: LGPL (v2.1 or any later version) * Author: Timothy B. Terriberry */ #include #include #include "config.h" int main(int _argc,const char *_argv[]){ /*Expect exactly one argument.*/ if(_argc!=2)return 1; if(strcmp(_argv[1],"depends")==0){ printf("ccan/compiler\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/ilog/ilog.c000066400000000000000000000061251224151530700160070ustar00rootroot00000000000000/*(C) Timothy B. Terriberry (tterribe@xiph.org) 2001-2009 LGPL (v2 or later). * See LICENSE file for details. */ #include "ilog.h" #include /*The fastest fallback strategy for platforms with fast multiplication appears to be based on de Bruijn sequences~\cite{LP98}. Tests confirmed this to be true even on an ARM11, where it is actually faster than using the native clz instruction. Define ILOG_NODEBRUIJN to use a simpler fallback on platforms where multiplication or table lookups are too expensive. @UNPUBLISHED{LP98, author="Charles E. Leiserson and Harald Prokop", title="Using de {Bruijn} Sequences to Index a 1 in a Computer Word", month=Jun, year=1998, note="\url{http://supertech.csail.mit.edu/papers/debruijn.pdf}" }*/ static UNNEEDED const unsigned char DEBRUIJN_IDX32[32]={ 0, 1,28, 2,29,14,24, 3,30,22,20,15,25,17, 4, 8, 31,27,13,23,21,19,16, 7,26,12,18, 6,11, 5,10, 9 }; /* We always compile these in, in case someone takes address of function. */ #undef ilog32_nz #undef ilog32 #undef ilog64_nz #undef ilog64 int ilog32(uint32_t _v){ /*On a Pentium M, this branchless version tested as the fastest version without multiplications on 1,000,000,000 random 32-bit integers, edging out a similar version with branches, and a 256-entry LUT version.*/ # if defined(ILOG_NODEBRUIJN) int ret; int m; ret=_v>0; m=(_v>0xFFFFU)<<4; _v>>=m; ret|=m; m=(_v>0xFFU)<<3; _v>>=m; ret|=m; m=(_v>0xFU)<<2; _v>>=m; ret|=m; m=(_v>3)<<1; _v>>=m; ret|=m; ret+=_v>1; return ret; /*This de Bruijn sequence version is faster if you have a fast multiplier.*/ # else int ret; ret=_v>0; _v|=_v>>1; _v|=_v>>2; _v|=_v>>4; _v|=_v>>8; _v|=_v>>16; _v=(_v>>1)+1; ret+=DEBRUIJN_IDX32[_v*0x77CB531U>>27&0x1F]; return ret; # endif } int ilog32_nz(uint32_t _v) { return ilog32(_v); } int ilog64(uint64_t _v){ # if defined(ILOG_NODEBRUIJN) uint32_t v; int ret; int m; ret=_v>0; m=(_v>0xFFFFFFFFU)<<5; v=(uint32_t)(_v>>m); ret|=m; m=(v>0xFFFFU)<<4; v>>=m; ret|=m; m=(v>0xFFU)<<3; v>>=m; ret|=m; m=(v>0xFU)<<2; v>>=m; ret|=m; m=(v>3)<<1; v>>=m; ret|=m; ret+=v>1; return ret; # else /*If we don't have a 64-bit word, split it into two 32-bit halves.*/ # if LONG_MAX<9223372036854775807LL uint32_t v; int ret; int m; ret=_v>0; m=(_v>0xFFFFFFFFU)<<5; v=(uint32_t)(_v>>m); ret|=m; v|=v>>1; v|=v>>2; v|=v>>4; v|=v>>8; v|=v>>16; v=(v>>1)+1; ret+=DEBRUIJN_IDX32[v*0x77CB531U>>27&0x1F]; return ret; /*Otherwise do it in one 64-bit operation.*/ # else static const unsigned char DEBRUIJN_IDX64[64]={ 0, 1, 2, 7, 3,13, 8,19, 4,25,14,28, 9,34,20,40, 5,17,26,38,15,46,29,48,10,31,35,54,21,50,41,57, 63, 6,12,18,24,27,33,39,16,37,45,47,30,53,49,56, 62,11,23,32,36,44,52,55,61,22,43,51,60,42,59,58 }; int ret; ret=_v>0; _v|=_v>>1; _v|=_v>>2; _v|=_v>>4; _v|=_v>>8; _v|=_v>>16; _v|=_v>>32; _v=(_v>>1)+1; ret+=DEBRUIJN_IDX64[_v*0x218A392CD3D5DBF>>58&0x3F]; return ret; # endif # endif } int ilog64_nz(uint64_t _v) { return ilog64(_v); } ntdb-1.0/lib/ccan/ilog/ilog.h000066400000000000000000000124651224151530700160200ustar00rootroot00000000000000/* Licensed under LGPLv2.1+ - see LICENSE file for details */ #if !defined(_ilog_H) # define _ilog_H (1) # include "config.h" # include # include # include /** * ilog32 - Integer binary logarithm of a 32-bit value. * @_v: A 32-bit value. * Returns floor(log2(_v))+1, or 0 if _v==0. * This is the number of bits that would be required to represent _v in two's * complement notation with all of the leading zeros stripped. * Note that many uses will resolve to the fast macro version instead. * * See Also: * ilog32_nz(), ilog64() * * Example: * // Rounds up to next power of 2 (if not a power of 2). * static uint32_t round_up32(uint32_t i) * { * assert(i != 0); * return 1U << ilog32(i-1); * } */ int ilog32(uint32_t _v) CONST_FUNCTION; /** * ilog32_nz - Integer binary logarithm of a non-zero 32-bit value. * @_v: A 32-bit value. * Returns floor(log2(_v))+1, or undefined if _v==0. * This is the number of bits that would be required to represent _v in two's * complement notation with all of the leading zeros stripped. * Note that many uses will resolve to the fast macro version instead. * See Also: * ilog32(), ilog64_nz() * Example: * // Find Last Set (ie. highest bit set, 0 to 31). * static uint32_t fls32(uint32_t i) * { * assert(i != 0); * return ilog32_nz(i) - 1; * } */ int ilog32_nz(uint32_t _v) CONST_FUNCTION; /** * ilog64 - Integer binary logarithm of a 64-bit value. * @_v: A 64-bit value. * Returns floor(log2(_v))+1, or 0 if _v==0. * This is the number of bits that would be required to represent _v in two's * complement notation with all of the leading zeros stripped. * Note that many uses will resolve to the fast macro version instead. * See Also: * ilog64_nz(), ilog32() */ int ilog64(uint64_t _v) CONST_FUNCTION; /** * ilog64_nz - Integer binary logarithm of a non-zero 64-bit value. * @_v: A 64-bit value. * Returns floor(log2(_v))+1, or undefined if _v==0. * This is the number of bits that would be required to represent _v in two's * complement notation with all of the leading zeros stripped. * Note that many uses will resolve to the fast macro version instead. * See Also: * ilog64(), ilog32_nz() */ int ilog64_nz(uint64_t _v) CONST_FUNCTION; /** * STATIC_ILOG_32 - The integer logarithm of an (unsigned, 32-bit) constant. * @_v: A non-negative 32-bit constant. * Returns floor(log2(_v))+1, or 0 if _v==0. * This is the number of bits that would be required to represent _v in two's * complement notation with all of the leading zeros stripped. * This macro should only be used when you need a compile-time constant, * otherwise ilog32 or ilog32_nz are just as fast and more flexible. * * Example: * #define MY_PAGE_SIZE 4096 * #define MY_PAGE_BITS (STATIC_ILOG_32(PAGE_SIZE) - 1) */ #define STATIC_ILOG_32(_v) (STATIC_ILOG5((uint32_t)(_v))) /** * STATIC_ILOG_64 - The integer logarithm of an (unsigned, 64-bit) constant. * @_v: A non-negative 64-bit constant. * Returns floor(log2(_v))+1, or 0 if _v==0. * This is the number of bits that would be required to represent _v in two's * complement notation with all of the leading zeros stripped. * This macro should only be used when you need a compile-time constant, * otherwise ilog64 or ilog64_nz are just as fast and more flexible. */ #define STATIC_ILOG_64(_v) (STATIC_ILOG6((uint64_t)(_v))) /* Private implementation details */ /*Note the casts to (int) below: this prevents "upgrading" the type of an entire expression to an (unsigned) size_t.*/ #if INT_MAX>=2147483647 && HAVE_BUILTIN_CLZ #define builtin_ilog32_nz(v) \ (((int)sizeof(unsigned)*CHAR_BIT) - __builtin_clz(v)) #elif LONG_MAX>=2147483647L && HAVE_BUILTIN_CLZL #define builtin_ilog32_nz(v) \ (((int)sizeof(unsigned)*CHAR_BIT) - __builtin_clzl(v)) #endif #if INT_MAX>=9223372036854775807LL && HAVE_BUILTIN_CLZ #define builtin_ilog64_nz(v) \ (((int)sizeof(unsigned)*CHAR_BIT) - __builtin_clz(v)) #elif LONG_MAX>=9223372036854775807LL && HAVE_BUILTIN_CLZL #define builtin_ilog64_nz(v) \ (((int)sizeof(unsigned long)*CHAR_BIT) - __builtin_clzl(v)) #elif HAVE_BUILTIN_CLZLL #define builtin_ilog64_nz(v) \ (((int)sizeof(unsigned long long)*CHAR_BIT) - __builtin_clzll(v)) #endif #ifdef builtin_ilog32_nz #define ilog32(_v) (builtin_ilog32_nz(_v)&-!!(_v)) #define ilog32_nz(_v) builtin_ilog32_nz(_v) #else #define ilog32_nz(_v) ilog32(_v) #define ilog32(_v) (IS_COMPILE_CONSTANT(_v) ? STATIC_ILOG_32(_v) : ilog32(_v)) #endif /* builtin_ilog32_nz */ #ifdef builtin_ilog64_nz #define ilog64(_v) (builtin_ilog64_nz(_v)&-!!(_v)) #define ilog64_nz(_v) builtin_ilog64_nz(_v) #else #define ilog64_nz(_v) ilog64(_v) #define ilog64(_v) (IS_COMPILE_CONSTANT(_v) ? STATIC_ILOG_64(_v) : ilog64(_v)) #endif /* builtin_ilog64_nz */ /* Macros for evaluating compile-time constant ilog. */ # define STATIC_ILOG0(_v) (!!(_v)) # define STATIC_ILOG1(_v) (((_v)&0x2)?2:STATIC_ILOG0(_v)) # define STATIC_ILOG2(_v) (((_v)&0xC)?2+STATIC_ILOG1((_v)>>2):STATIC_ILOG1(_v)) # define STATIC_ILOG3(_v) \ (((_v)&0xF0)?4+STATIC_ILOG2((_v)>>4):STATIC_ILOG2(_v)) # define STATIC_ILOG4(_v) \ (((_v)&0xFF00)?8+STATIC_ILOG3((_v)>>8):STATIC_ILOG3(_v)) # define STATIC_ILOG5(_v) \ (((_v)&0xFFFF0000)?16+STATIC_ILOG4((_v)>>16):STATIC_ILOG4(_v)) # define STATIC_ILOG6(_v) \ (((_v)&0xFFFFFFFF00000000ULL)?32+STATIC_ILOG5((_v)>>32):STATIC_ILOG5(_v)) #endif /* _ilog_H */ ntdb-1.0/lib/ccan/ilog/test/000077500000000000000000000000001224151530700156645ustar00rootroot00000000000000ntdb-1.0/lib/ccan/ilog/test/run-out-of-line.c000066400000000000000000000031411224151530700207670ustar00rootroot00000000000000#include #include #include #include /*Dead simple (but slow) versions to compare against.*/ static int test_ilog32(uint32_t _v){ int ret; for(ret=0;_v;ret++)_v>>=1; return ret; } static int test_ilog64(uint64_t _v){ int ret; for(ret=0;_v;ret++)_v>>=1; return ret; } #define NTRIALS (64) int main(int _argc,const char *_argv[]){ int i; int j; int (*il32)(uint32_t) = ilog32; int (*il64)(uint64_t) = ilog64; int (*il32_nz)(uint32_t) = ilog32_nz; int (*il64_nz)(uint64_t) = ilog64_nz; /*This is how many tests you plan to run.*/ plan_tests(33 * NTRIALS * 3 + 65 * NTRIALS * 3); for(i=0;i<=32;i++){ uint32_t v; /*Test each bit in turn (and 0).*/ v=i?(uint32_t)1U<<(i-1):0; for(j=0;j>((33-i)>>1)>>((32-i)>>1); } } for(i=0;i<=64;i++){ uint64_t v; /*Test each bit in turn (and 0).*/ v=i?(uint64_t)1U<<(i-1):0; for(j=0;j>((65-i)>>1)>>((64-i)>>1)); } } return exit_status(); } ntdb-1.0/lib/ccan/ilog/test/run.c000066400000000000000000000027241224151530700166410ustar00rootroot00000000000000#include #include #include #include /*Dead simple (but slow) versions to compare against.*/ static int test_ilog32(uint32_t _v){ int ret; for(ret=0;_v;ret++)_v>>=1; return ret; } static int test_ilog64(uint64_t _v){ int ret; for(ret=0;_v;ret++)_v>>=1; return ret; } #define NTRIALS (64) int main(int _argc,const char *_argv[]){ int i; int j; /*This is how many tests you plan to run.*/ plan_tests(33 * NTRIALS * 3 + 65 * NTRIALS * 3); for(i=0;i<=32;i++){ uint32_t v; /*Test each bit in turn (and 0).*/ v=i?(uint32_t)1U<<(i-1):0; for(j=0;j>((33-i)>>1)>>((32-i)>>1); } } for(i=0;i<=64;i++){ uint64_t v; /*Test each bit in turn (and 0).*/ v=i?(uint64_t)1U<<(i-1):0; for(j=0;j>((65-i)>>1)>>((64-i)>>1)); } } return exit_status(); } ntdb-1.0/lib/ccan/likely/000077500000000000000000000000001224151530700152445ustar00rootroot00000000000000ntdb-1.0/lib/ccan/likely/LICENSE000066400000000000000000000636351224151530700162660ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/likely/_info000066400000000000000000000020131224151530700162550ustar00rootroot00000000000000#include #include #include "config.h" /** * likely - macros for annotating likely/unlikely branches in the code * * Inspired by Andi Kleen's macros for the Linux Kernel, these macros * help you annotate rare paths in your code for the convenience of the * compiler and the reader. * * License: LGPL (v2.1 or any later version) * Author: Rusty Russell * * Example: * #include * #include * * int main(int argc, char *argv[]) * { * // This example is silly: the compiler knows exit() is unlikely. * if (unlikely(argc == 1)) { * fprintf(stderr, "Usage: %s ...\n", argv[0]); * return 1; * } * for (argc++; argv[argc]; argc++) * printf("%s\n", argv[argc]); * return 0; * } */ int main(int argc, char *argv[]) { /* Expect exactly one argument */ if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/str\n"); printf("ccan/htable\n"); printf("ccan/hash\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/likely/likely.c000066400000000000000000000057721224151530700167140ustar00rootroot00000000000000/* Licensed under LGPLv2.1+ - see LICENSE file for details */ #ifdef CCAN_LIKELY_DEBUG #include #include #include #include #include struct trace { const char *condstr; const char *file; unsigned int line; bool expect; unsigned long count, right; }; static size_t hash_trace(const struct trace *trace) { return hash(trace->condstr, strlen(trace->condstr), hash(trace->file, strlen(trace->file), trace->line + trace->expect)); } static bool trace_eq(const struct trace *t1, const struct trace *t2) { return t1->condstr == t2->condstr && t1->file == t2->file && t1->line == t2->line && t1->expect == t2->expect; } /* struct thash */ HTABLE_DEFINE_TYPE(struct trace, (const struct trace *), hash_trace, trace_eq, thash); static struct thash htable = { HTABLE_INITIALIZER(htable.raw, thash_hash, NULL) }; static void init_trace(struct trace *trace, const char *condstr, const char *file, unsigned int line, bool expect) { trace->condstr = condstr; trace->file = file; trace->line = line; trace->expect = expect; trace->count = trace->right = 0; } static struct trace *add_trace(const struct trace *t) { struct trace *trace = malloc(sizeof(*trace)); *trace = *t; thash_add(&htable, trace); return trace; } long _likely_trace(bool cond, bool expect, const char *condstr, const char *file, unsigned int line) { struct trace *p, trace; init_trace(&trace, condstr, file, line, expect); p = thash_get(&htable, &trace); if (!p) p = add_trace(&trace); p->count++; if (cond == expect) p->right++; return cond; } static double right_ratio(const struct trace *t) { return (double)t->right / t->count; } char *likely_stats(unsigned int min_hits, unsigned int percent) { struct trace *worst; double worst_ratio; struct thash_iter i; char *ret; struct trace *t; worst = NULL; worst_ratio = 2; /* This is O(n), but it's not likely called that often. */ for (t = thash_first(&htable, &i); t; t = thash_next(&htable, &i)) { if (t->count >= min_hits) { if (right_ratio(t) < worst_ratio) { worst = t; worst_ratio = right_ratio(t); } } } if (worst_ratio * 100 > percent) return NULL; ret = malloc(strlen(worst->condstr) + strlen(worst->file) + sizeof(long int) * 8 + sizeof("%s:%u:%slikely(%s) correct %u%% (%lu/%lu)")); sprintf(ret, "%s:%u:%slikely(%s) correct %u%% (%lu/%lu)", worst->file, worst->line, worst->expect ? "" : "un", worst->condstr, (unsigned)(worst_ratio * 100), worst->right, worst->count); thash_del(&htable, worst); free(worst); return ret; } void likely_stats_reset(void) { struct thash_iter i; struct trace *t; /* This is a bit better than O(n^2), but we have to loop since * first/next during delete is unreliable. */ while ((t = thash_first(&htable, &i)) != NULL) { for (; t; t = thash_next(&htable, &i)) { thash_del(&htable, t); free(t); } } thash_clear(&htable); } #endif /*CCAN_LIKELY_DEBUG*/ ntdb-1.0/lib/ccan/likely/likely.h000066400000000000000000000065531224151530700167170ustar00rootroot00000000000000/* Licensed under LGPLv2.1+ - see LICENSE file for details */ #ifndef CCAN_LIKELY_H #define CCAN_LIKELY_H #include "config.h" #include #include #ifndef CCAN_LIKELY_DEBUG #if HAVE_BUILTIN_EXPECT /** * likely - indicate that a condition is likely to be true. * @cond: the condition * * This uses a compiler extension where available to indicate a likely * code path and optimize appropriately; it's also useful for readers * to quickly identify exceptional paths through functions. The * threshold for "likely" is usually considered to be between 90 and * 99%; marginal cases should not be marked either way. * * See Also: * unlikely(), likely_stats() * * Example: * // Returns false if we overflow. * static inline bool inc_int(unsigned int *val) * { * (*val)++; * if (likely(*val)) * return true; * return false; * } */ #ifndef likely #define likely(cond) __builtin_expect(!!(cond), 1) #endif /** * unlikely - indicate that a condition is unlikely to be true. * @cond: the condition * * This uses a compiler extension where available to indicate an unlikely * code path and optimize appropriately; see likely() above. * * See Also: * likely(), likely_stats(), COLD (compiler.h) * * Example: * // Prints a warning if we overflow. * static inline void inc_int(unsigned int *val) * { * (*val)++; * if (unlikely(*val == 0)) * fprintf(stderr, "Overflow!"); * } */ #ifndef unlikely #define unlikely(cond) __builtin_expect(!!(cond), 0) #endif #else #ifndef likely #define likely(cond) (!!(cond)) #endif #ifndef unlikely #define unlikely(cond) (!!(cond)) #endif #endif #else /* CCAN_LIKELY_DEBUG versions */ #define likely(cond) \ (_likely_trace(!!(cond), 1, stringify(cond), __FILE__, __LINE__)) #define unlikely(cond) \ (_likely_trace(!!(cond), 0, stringify(cond), __FILE__, __LINE__)) long _likely_trace(bool cond, bool expect, const char *condstr, const char *file, unsigned int line); #endif #ifdef CCAN_LIKELY_DEBUG /** * likely_stats - return description of abused likely()/unlikely() * @min_hits: minimum number of hits * @percent: maximum percentage correct * * When CCAN_LIKELY_DEBUG is defined, likely() and unlikely() trace their * results: this causes a significant slowdown, but allows analysis of * whether the branches are labelled correctly. * * This function returns a malloc'ed description of the least-correct * usage of likely() or unlikely(). It ignores places which have been * called less than @min_hits times, and those which were predicted * correctly more than @percent of the time. It returns NULL when * nothing meets those criteria. * * Note that this call is destructive; the returned offender is * removed from the trace so that the next call to likely_stats() will * return the next-worst likely()/unlikely() usage. * * Example: * // Print every place hit more than twice which was wrong > 5%. * static void report_stats(void) * { * #ifdef CCAN_LIKELY_DEBUG * const char *bad; * * while ((bad = likely_stats(2, 95)) != NULL) { * printf("Suspicious likely: %s", bad); * free(bad); * } * #endif * } */ char *likely_stats(unsigned int min_hits, unsigned int percent); /** * likely_stats_reset - free up memory of likely()/unlikely() branches. * * This can also plug memory leaks. */ void likely_stats_reset(void); #endif /* CCAN_LIKELY_DEBUG */ #endif /* CCAN_LIKELY_H */ ntdb-1.0/lib/ccan/likely/test/000077500000000000000000000000001224151530700162235ustar00rootroot00000000000000ntdb-1.0/lib/ccan/likely/test/run-debug.c000066400000000000000000000044451224151530700202660ustar00rootroot00000000000000#define CCAN_LIKELY_DEBUG 1 #include #include #include #include static bool one_seems_likely(unsigned int val) { if (likely(val == 1)) return true; return false; } static bool one_seems_unlikely(unsigned int val) { if (unlikely(val == 1)) return true; return false; } static bool likely_one_unlikely_two(unsigned int val1, unsigned int val2) { /* Same line, check we don't get confused! */ if (likely(val1 == 1) && unlikely(val2 == 2)) return true; return false; } int main(int argc, char *argv[]) { char *bad; plan_tests(14); /* Correct guesses. */ one_seems_likely(1); ok1(likely_stats(0, 90) == NULL); one_seems_unlikely(2); ok1(likely_stats(0, 90) == NULL); /* Incorrect guesses. */ one_seems_likely(0); one_seems_likely(2); /* Hasn't been hit 4 times, so this fails */ ok1(!likely_stats(4, 90)); bad = likely_stats(3, 90); ok(strends(bad, "run-debug.c:9:likely(val == 1) correct 33% (1/3)"), "likely_stats returned %s", bad); free(bad); /* Nothing else above 90% */ ok1(!likely_stats(0, 90)); /* This should get everything. */ bad = likely_stats(0, 100); ok(strends(bad, "run-debug.c:16:unlikely(val == 1) correct 100% (1/1)"), "likely_stats returned %s", bad); free(bad); /* Nothing left (table is actually cleared) */ ok1(!likely_stats(0, 100)); /* Make sure unlikely works */ one_seems_unlikely(0); one_seems_unlikely(2); one_seems_unlikely(1); bad = likely_stats(0, 90); ok(strends(bad, "run-debug.c:16:unlikely(val == 1) correct 66% (2/3)"), "likely_stats returned %s", bad); free(bad); ok1(!likely_stats(0, 100)); likely_one_unlikely_two(1, 1); likely_one_unlikely_two(1, 1); likely_one_unlikely_two(1, 1); ok1(!likely_stats(0, 90)); likely_one_unlikely_two(1, 2); bad = likely_stats(0, 90); ok(strends(bad, "run-debug.c:24:unlikely(val2 == 2) correct 75% (3/4)"), "likely_stats returned %s", bad); free(bad); bad = likely_stats(0, 100); ok(strends(bad, "run-debug.c:24:likely(val1 == 1) correct 100% (4/4)"), "likely_stats returned %s", bad); free(bad); ok1(!likely_stats(0, 100)); /* Check that reset works! */ one_seems_unlikely(0); one_seems_unlikely(2); one_seems_unlikely(1); likely_stats_reset(); ok1(!likely_stats(0, 100)); exit(exit_status()); } ntdb-1.0/lib/ccan/likely/test/run.c000066400000000000000000000011151224151530700171710ustar00rootroot00000000000000#include #include #include #include static bool one_seems_likely(unsigned int val) { if (likely(val == 1)) return true; return false; } static bool one_seems_unlikely(unsigned int val) { if (unlikely(val == 1)) return true; return false; } int main(int argc, char *argv[]) { plan_tests(4); /* Without debug, we can only check that it doesn't effect functions. */ ok1(one_seems_likely(1)); ok1(!one_seems_likely(2)); ok1(one_seems_unlikely(1)); ok1(!one_seems_unlikely(2)); exit(exit_status()); } ntdb-1.0/lib/ccan/list/000077500000000000000000000000001224151530700147265ustar00rootroot00000000000000ntdb-1.0/lib/ccan/list/.namespacize000066400000000000000000000007171224151530700172330ustar00rootroot00000000000000list_node_from_off_ list_node_to_off_ list_tail_ list_top_ list_del_from list_del list_empty list_add_tail list_add list_head_init list_check_node list_check list_head list_node list_off_var_ list_off_ list_del_from_off list_del_off list_add_off list_tail_off list_head_off list_entry_off list_for_each_safe_off list_for_each_off list_for_each_safe list_for_each_rev list_for_each list_tail list_top list_entry LIST_HEAD LIST_HEAD_INIT list_debug_node list_debug ntdb-1.0/lib/ccan/list/LICENSE000066400000000000000000000636351224151530700157500ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/list/_info000066400000000000000000000027321224151530700157470ustar00rootroot00000000000000#include #include #include "config.h" /** * list - double linked list routines * * The list header contains routines for manipulating double linked lists. * It defines two types: struct list_head used for anchoring lists, and * struct list_node which is usually embedded in the structure which is placed * in the list. * * Example: * #include * #include * #include * #include * * struct parent { * const char *name; * struct list_head children; * unsigned int num_children; * }; * * struct child { * const char *name; * struct list_node list; * }; * * int main(int argc, char *argv[]) * { * struct parent p; * struct child *c; * unsigned int i; * * if (argc < 2) * errx(1, "Usage: %s parent children...", argv[0]); * * p.name = argv[1]; * list_head_init(&p.children); * p.num_children = 0; * for (i = 2; i < argc; i++) { * c = malloc(sizeof(*c)); * c->name = argv[i]; * list_add(&p.children, &c->list); * p.num_children++; * } * * printf("%s has %u children:", p.name, p.num_children); * list_for_each(&p.children, c, list) * printf("%s ", c->name); * printf("\n"); * return 0; * } * * License: LGPL (v2.1 or any later version) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/container_of\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/list/list.c000066400000000000000000000020461224151530700160470ustar00rootroot00000000000000/* Licensed under LGPLv2.1+ - see LICENSE file for details */ #include #include #include "list.h" static void *corrupt(const char *abortstr, const struct ccan_list_node *head, const struct ccan_list_node *node, unsigned int count) { if (abortstr) { fprintf(stderr, "%s: prev corrupt in node %p (%u) of %p\n", abortstr, node, count, head); abort(); } return NULL; } struct ccan_list_node *ccan_list_check_node(const struct ccan_list_node *node, const char *abortstr) { const struct ccan_list_node *p, *n; int count = 0; for (p = node, n = node->next; n != node; p = n, n = n->next) { count++; if (n->prev != p) return corrupt(abortstr, node, n, count); } /* Check prev on head node. */ if (node->prev != p) return corrupt(abortstr, node, node, 0); return (struct ccan_list_node *)node; } struct ccan_list_head *ccan_list_check(const struct ccan_list_head *h, const char *abortstr) { if (!ccan_list_check_node(&h->n, abortstr)) return NULL; return (struct ccan_list_head *)h; } ntdb-1.0/lib/ccan/list/list.d000066400000000000000000000026351224151530700160540ustar00rootroot00000000000000ccan/list/list.o: ccan/list/list.c /usr/include/stdio.h \ /usr/include/features.h /usr/include/i386-linux-gnu/bits/predefs.h \ /usr/include/i386-linux-gnu/sys/cdefs.h \ /usr/include/i386-linux-gnu/bits/wordsize.h \ /usr/include/i386-linux-gnu/gnu/stubs.h \ /usr/include/i386-linux-gnu/gnu/stubs-32.h \ /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stddef.h \ /usr/include/i386-linux-gnu/bits/types.h \ /usr/include/i386-linux-gnu/bits/typesizes.h /usr/include/libio.h \ /usr/include/_G_config.h /usr/include/wchar.h \ /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stdarg.h \ /usr/include/i386-linux-gnu/bits/stdio_lim.h \ /usr/include/i386-linux-gnu/bits/sys_errlist.h /usr/include/stdlib.h \ /usr/include/i386-linux-gnu/bits/waitflags.h \ /usr/include/i386-linux-gnu/bits/waitstatus.h /usr/include/endian.h \ /usr/include/i386-linux-gnu/bits/endian.h \ /usr/include/i386-linux-gnu/bits/byteswap.h \ /usr/include/i386-linux-gnu/sys/types.h /usr/include/time.h \ /usr/include/i386-linux-gnu/sys/select.h \ /usr/include/i386-linux-gnu/bits/select.h \ /usr/include/i386-linux-gnu/bits/sigset.h \ /usr/include/i386-linux-gnu/bits/time.h \ /usr/include/i386-linux-gnu/sys/sysmacros.h \ /usr/include/i386-linux-gnu/bits/pthreadtypes.h /usr/include/alloca.h \ ccan/list/list.h /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stdbool.h \ /usr/include/assert.h ccan/container_of/container_of.h config.h \ ccan/check_type/check_type.h ntdb-1.0/lib/ccan/list/list.h000066400000000000000000000345531224151530700160640ustar00rootroot00000000000000/* Licensed under LGPLv2.1+ - see LICENSE file for details */ #ifndef CCAN_LIST_H #define CCAN_LIST_H #include #include #include #include /** * struct ccan_list_node - an entry in a doubly-linked list * @next: next entry (self if empty) * @prev: previous entry (self if empty) * * This is used as an entry in a linked list. * Example: * struct child { * const char *name; * // Linked list of all us children. * struct ccan_list_node list; * }; */ struct ccan_list_node { struct ccan_list_node *next, *prev; }; /** * struct ccan_list_head - the head of a doubly-linked list * @h: the ccan_list_head (containing next and prev pointers) * * This is used as the head of a linked list. * Example: * struct parent { * const char *name; * struct ccan_list_head children; * unsigned int num_children; * }; */ struct ccan_list_head { struct ccan_list_node n; }; /** * ccan_list_check - check head of a list for consistency * @h: the ccan_list_head * @abortstr: the location to print on aborting, or NULL. * * Because list_nodes have redundant information, consistency checking between * the back and forward links can be done. This is useful as a debugging check. * If @abortstr is non-NULL, that will be printed in a diagnostic if the list * is inconsistent, and the function will abort. * * Returns the list head if the list is consistent, NULL if not (it * can never return NULL if @abortstr is set). * * See also: ccan_list_check_node() * * Example: * static void dump_parent(struct parent *p) * { * struct child *c; * * printf("%s (%u children):\n", p->name, p->num_children); * ccan_list_check(&p->children, "bad child list"); * ccan_list_for_each(&p->children, c, list) * printf(" -> %s\n", c->name); * } */ struct ccan_list_head *ccan_list_check(const struct ccan_list_head *h, const char *abortstr); /** * ccan_list_check_node - check node of a list for consistency * @n: the ccan_list_node * @abortstr: the location to print on aborting, or NULL. * * Check consistency of the list node is in (it must be in one). * * See also: ccan_list_check() * * Example: * static void dump_child(const struct child *c) * { * ccan_list_check_node(&c->list, "bad child list"); * printf("%s\n", c->name); * } */ struct ccan_list_node *ccan_list_check_node(const struct ccan_list_node *n, const char *abortstr); #ifdef CCAN_LIST_DEBUG #define ccan_list_debug(h) ccan_list_check((h), __func__) #define ccan_list_debug_node(n) ccan_list_check_node((n), __func__) #else #define ccan_list_debug(h) (h) #define ccan_list_debug_node(n) (n) #endif /** * CCAN_LIST_HEAD_INIT - initializer for an empty ccan_list_head * @name: the name of the list. * * Explicit initializer for an empty list. * * See also: * CCAN_LIST_HEAD, ccan_list_head_init() * * Example: * static struct ccan_list_head my_list = CCAN_LIST_HEAD_INIT(my_list); */ #define CCAN_LIST_HEAD_INIT(name) { { &name.n, &name.n } } /** * CCAN_LIST_HEAD - define and initialize an empty ccan_list_head * @name: the name of the list. * * The CCAN_LIST_HEAD macro defines a ccan_list_head and initializes it to an empty * list. It can be prepended by "static" to define a static ccan_list_head. * * See also: * CCAN_LIST_HEAD_INIT, ccan_list_head_init() * * Example: * static CCAN_LIST_HEAD(my_global_list); */ #define CCAN_LIST_HEAD(name) \ struct ccan_list_head name = CCAN_LIST_HEAD_INIT(name) /** * ccan_list_head_init - initialize a ccan_list_head * @h: the ccan_list_head to set to the empty list * * Example: * ... * struct parent *parent = malloc(sizeof(*parent)); * * ccan_list_head_init(&parent->children); * parent->num_children = 0; */ static inline void ccan_list_head_init(struct ccan_list_head *h) { h->n.next = h->n.prev = &h->n; } /** * ccan_list_add - add an entry at the start of a linked list. * @h: the ccan_list_head to add the node to * @n: the ccan_list_node to add to the list. * * The ccan_list_node does not need to be initialized; it will be overwritten. * Example: * struct child *child = malloc(sizeof(*child)); * * child->name = "marvin"; * ccan_list_add(&parent->children, &child->list); * parent->num_children++; */ static inline void ccan_list_add(struct ccan_list_head *h, struct ccan_list_node *n) { n->next = h->n.next; n->prev = &h->n; h->n.next->prev = n; h->n.next = n; (void)ccan_list_debug(h); } /** * ccan_list_add_tail - add an entry at the end of a linked list. * @h: the ccan_list_head to add the node to * @n: the ccan_list_node to add to the list. * * The ccan_list_node does not need to be initialized; it will be overwritten. * Example: * ccan_list_add_tail(&parent->children, &child->list); * parent->num_children++; */ static inline void ccan_list_add_tail(struct ccan_list_head *h, struct ccan_list_node *n) { n->next = &h->n; n->prev = h->n.prev; h->n.prev->next = n; h->n.prev = n; (void)ccan_list_debug(h); } /** * ccan_list_empty - is a list empty? * @h: the ccan_list_head * * If the list is empty, returns true. * * Example: * assert(ccan_list_empty(&parent->children) == (parent->num_children == 0)); */ static inline bool ccan_list_empty(const struct ccan_list_head *h) { (void)ccan_list_debug(h); return h->n.next == &h->n; } /** * ccan_list_del - delete an entry from an (unknown) linked list. * @n: the ccan_list_node to delete from the list. * * Note that this leaves @n in an undefined state; it can be added to * another list, but not deleted again. * * See also: * ccan_list_del_from() * * Example: * ccan_list_del(&child->list); * parent->num_children--; */ static inline void ccan_list_del(struct ccan_list_node *n) { (void)ccan_list_debug_node(n); n->next->prev = n->prev; n->prev->next = n->next; #ifdef CCAN_LIST_DEBUG /* Catch use-after-del. */ n->next = n->prev = NULL; #endif } /** * ccan_list_del_from - delete an entry from a known linked list. * @h: the ccan_list_head the node is in. * @n: the ccan_list_node to delete from the list. * * This explicitly indicates which list a node is expected to be in, * which is better documentation and can catch more bugs. * * See also: ccan_list_del() * * Example: * ccan_list_del_from(&parent->children, &child->list); * parent->num_children--; */ static inline void ccan_list_del_from(struct ccan_list_head *h, struct ccan_list_node *n) { #ifdef CCAN_LIST_DEBUG { /* Thorough check: make sure it was in list! */ struct ccan_list_node *i; for (i = h->n.next; i != n; i = i->next) assert(i != &h->n); } #endif /* CCAN_LIST_DEBUG */ /* Quick test that catches a surprising number of bugs. */ assert(!ccan_list_empty(h)); ccan_list_del(n); } /** * ccan_list_entry - convert a ccan_list_node back into the structure containing it. * @n: the ccan_list_node * @type: the type of the entry * @member: the ccan_list_node member of the type * * Example: * // First list entry is children.next; convert back to child. * child = ccan_list_entry(parent->children.n.next, struct child, list); * * See Also: * ccan_list_top(), ccan_list_for_each() */ #define ccan_list_entry(n, type, member) container_of(n, type, member) /** * ccan_list_top - get the first entry in a list * @h: the ccan_list_head * @type: the type of the entry * @member: the ccan_list_node member of the type * * If the list is empty, returns NULL. * * Example: * struct child *first; * first = ccan_list_top(&parent->children, struct child, list); */ #define ccan_list_top(h, type, member) \ ((type *)ccan_list_top_((h), ccan_list_off_(type, member))) static inline const void *ccan_list_top_(const struct ccan_list_head *h, size_t off) { if (ccan_list_empty(h)) return NULL; return (const char *)h->n.next - off; } /** * ccan_list_tail - get the last entry in a list * @h: the ccan_list_head * @type: the type of the entry * @member: the ccan_list_node member of the type * * If the list is empty, returns NULL. * * Example: * struct child *last; * last = ccan_list_tail(&parent->children, struct child, list); */ #define ccan_list_tail(h, type, member) \ ((type *)ccan_list_tail_((h), ccan_list_off_(type, member))) static inline const void *ccan_list_tail_(const struct ccan_list_head *h, size_t off) { if (ccan_list_empty(h)) return NULL; return (const char *)h->n.prev - off; } /** * ccan_list_for_each - iterate through a list. * @h: the ccan_list_head (warning: evaluated multiple times!) * @i: the structure containing the ccan_list_node * @member: the ccan_list_node member of the structure * * This is a convenient wrapper to iterate @i over the entire list. It's * a for loop, so you can break and continue as normal. * * Example: * ccan_list_for_each(&parent->children, child, list) * printf("Name: %s\n", child->name); */ #define ccan_list_for_each(h, i, member) \ ccan_list_for_each_off(h, i, ccan_list_off_var_(i, member)) /** * ccan_list_for_each_rev - iterate through a list backwards. * @h: the ccan_list_head * @i: the structure containing the ccan_list_node * @member: the ccan_list_node member of the structure * * This is a convenient wrapper to iterate @i over the entire list. It's * a for loop, so you can break and continue as normal. * * Example: * ccan_list_for_each_rev(&parent->children, child, list) * printf("Name: %s\n", child->name); */ #define ccan_list_for_each_rev(h, i, member) \ for (i = container_of_var(ccan_list_debug(h)->n.prev, i, member); \ &i->member != &(h)->n; \ i = container_of_var(i->member.prev, i, member)) /** * ccan_list_for_each_safe - iterate through a list, maybe during deletion * @h: the ccan_list_head * @i: the structure containing the ccan_list_node * @nxt: the structure containing the ccan_list_node * @member: the ccan_list_node member of the structure * * This is a convenient wrapper to iterate @i over the entire list. It's * a for loop, so you can break and continue as normal. The extra variable * @nxt is used to hold the next element, so you can delete @i from the list. * * Example: * struct child *next; * ccan_list_for_each_safe(&parent->children, child, next, list) { * ccan_list_del(&child->list); * parent->num_children--; * } */ #define ccan_list_for_each_safe(h, i, nxt, member) \ ccan_list_for_each_safe_off(h, i, nxt, ccan_list_off_var_(i, member)) /** * ccan_list_for_each_off - iterate through a list of memory regions. * @h: the ccan_list_head * @i: the pointer to a memory region wich contains list node data. * @off: offset(relative to @i) at which list node data resides. * * This is a low-level wrapper to iterate @i over the entire list, used to * implement all oher, more high-level, for-each constructs. It's a for loop, * so you can break and continue as normal. * * WARNING! Being the low-level macro that it is, this wrapper doesn't know * nor care about the type of @i. The only assumtion made is that @i points * to a chunk of memory that at some @offset, relative to @i, contains a * properly filled `struct node_list' which in turn contains pointers to * memory chunks and it's turtles all the way down. Whith all that in mind * remember that given the wrong pointer/offset couple this macro will * happilly churn all you memory untill SEGFAULT stops it, in other words * caveat emptor. * * It is worth mentioning that one of legitimate use-cases for that wrapper * is operation on opaque types with known offset for `struct ccan_list_node' * member(preferably 0), because it allows you not to disclose the type of * @i. * * Example: * ccan_list_for_each_off(&parent->children, child, * offsetof(struct child, list)) * printf("Name: %s\n", child->name); */ #define ccan_list_for_each_off(h, i, off) \ for (i = ccan_list_node_to_off_(ccan_list_debug(h)->n.next, (off)); \ ccan_list_node_from_off_((void *)i, (off)) != &(h)->n; \ i = ccan_list_node_to_off_(ccan_list_node_from_off_((void *)i, (off))->next, \ (off))) /** * ccan_list_for_each_safe_off - iterate through a list of memory regions, maybe * during deletion * @h: the ccan_list_head * @i: the pointer to a memory region wich contains list node data. * @nxt: the structure containing the ccan_list_node * @off: offset(relative to @i) at which list node data resides. * * For details see `ccan_list_for_each_off' and `ccan_list_for_each_safe' * descriptions. * * Example: * ccan_list_for_each_safe_off(&parent->children, child, * next, offsetof(struct child, list)) * printf("Name: %s\n", child->name); */ #define ccan_list_for_each_safe_off(h, i, nxt, off) \ for (i = ccan_list_node_to_off_(ccan_list_debug(h)->n.next, (off)), \ nxt = ccan_list_node_to_off_(ccan_list_node_from_off_(i, (off))->next, \ (off)); \ ccan_list_node_from_off_(i, (off)) != &(h)->n; \ i = nxt, \ nxt = ccan_list_node_to_off_(ccan_list_node_from_off_(i, (off))->next, \ (off))) /* Other -off variants. */ #define ccan_list_entry_off(n, type, off) \ ((type *)ccan_list_node_from_off_((n), (off))) #define ccan_list_head_off(h, type, off) \ ((type *)ccan_list_head_off((h), (off))) #define ccan_list_tail_off(h, type, off) \ ((type *)ccan_list_tail_((h), (off))) #define ccan_list_add_off(h, n, off) \ ccan_list_add((h), ccan_list_node_from_off_((n), (off))) #define ccan_list_del_off(n, off) \ ccan_list_del(ccan_list_node_from_off_((n), (off))) #define ccan_list_del_from_off(h, n, off) \ ccan_list_del_from(h, ccan_list_node_from_off_((n), (off))) /* Offset helper functions so we only single-evaluate. */ static inline void *ccan_list_node_to_off_(struct ccan_list_node *node, size_t off) { return (void *)((char *)node - off); } static inline struct ccan_list_node *ccan_list_node_from_off_(void *ptr, size_t off) { return (struct ccan_list_node *)((char *)ptr + off); } /* Get the offset of the member, but make sure it's a ccan_list_node. */ #define ccan_list_off_(type, member) \ (container_off(type, member) + \ check_type(((type *)0)->member, struct ccan_list_node)) #define ccan_list_off_var_(var, member) \ (container_off_var(var, member) + \ check_type(var->member, struct ccan_list_node)) #endif /* CCAN_LIST_H */ ntdb-1.0/lib/ccan/list/test/000077500000000000000000000000001224151530700157055ustar00rootroot00000000000000ntdb-1.0/lib/ccan/list/test/compile_ok-constant.c000066400000000000000000000016601224151530700220240ustar00rootroot00000000000000#include #include #include #include #include struct child { const char *name; struct ccan_list_node list; }; static bool children(const struct ccan_list_head *list) { return !ccan_list_empty(list); } static const struct child *first_child(const struct ccan_list_head *list) { return ccan_list_top(list, struct child, list); } static const struct child *last_child(const struct ccan_list_head *list) { return ccan_list_tail(list, struct child, list); } static void check_children(const struct ccan_list_head *list) { ccan_list_check(list, "bad child list"); } static void print_children(const struct ccan_list_head *list) { const struct child *c; ccan_list_for_each(list, c, list) printf("%s\n", c->name); } int main(void) { CCAN_LIST_HEAD(h); children(&h); first_child(&h); last_child(&h); check_children(&h); print_children(&h); return 0; } ntdb-1.0/lib/ccan/list/test/helper.c000066400000000000000000000024311224151530700173300ustar00rootroot00000000000000#include #include #include #include #include "helper.h" #define ANSWER_TO_THE_ULTIMATE_QUESTION_OF_LIFE_THE_UNIVERSE_AND_EVERYTHING \ (42) struct opaque { struct ccan_list_node list; size_t secret_offset; char secret_drawer[42]; }; static bool not_randomized = true; struct opaque *create_opaque_blob(void) { struct opaque *blob = calloc(1, sizeof(struct opaque)); if (not_randomized) { srandom((int)time(NULL)); not_randomized = false; } blob->secret_offset = random() % (sizeof(blob->secret_drawer)); blob->secret_drawer[blob->secret_offset] = ANSWER_TO_THE_ULTIMATE_QUESTION_OF_LIFE_THE_UNIVERSE_AND_EVERYTHING; return blob; } bool if_blobs_know_the_secret(struct opaque *blob) { bool answer = true; int i; for (i = 0; i < sizeof(blob->secret_drawer) / sizeof(blob->secret_drawer[0]); i++) if (i != blob->secret_offset) answer = answer && (blob->secret_drawer[i] == 0); else answer = answer && (blob->secret_drawer[blob->secret_offset] == ANSWER_TO_THE_ULTIMATE_QUESTION_OF_LIFE_THE_UNIVERSE_AND_EVERYTHING); return answer; } void destroy_opaque_blob(struct opaque *blob) { free(blob); } ntdb-1.0/lib/ccan/list/test/helper.h000066400000000000000000000003671224151530700173430ustar00rootroot00000000000000/* These are in a separate C file so we can test undefined structures. */ struct opaque; typedef struct opaque opaque_t; opaque_t *create_opaque_blob(void); bool if_blobs_know_the_secret(opaque_t *blob); void destroy_opaque_blob(opaque_t *blob); ntdb-1.0/lib/ccan/list/test/run-check-corrupt.c000066400000000000000000000041171224151530700214270ustar00rootroot00000000000000#include #include #include #include #include #include /* We don't actually want it to exit... */ static jmp_buf aborted; #define abort() longjmp(aborted, 1) #define fprintf my_fprintf static char printf_buffer[1000]; static int my_fprintf(FILE *stream, const char *format, ...) { va_list ap; int ret; va_start(ap, format); ret = vsprintf(printf_buffer, format, ap); va_end(ap); return ret; } #include #include #include int main(int argc, char *argv[]) { struct ccan_list_head list; struct ccan_list_node n1; char expect[100]; plan_tests(9); /* Empty list. */ list.n.next = &list.n; list.n.prev = &list.n; ok1(ccan_list_check(&list, NULL) == &list); /* Bad back ptr */ list.n.prev = &n1; /* Non-aborting version. */ ok1(ccan_list_check(&list, NULL) == NULL); /* Aborting version. */ sprintf(expect, "test message: prev corrupt in node %p (0) of %p\n", &list, &list); if (setjmp(aborted) == 0) { ccan_list_check(&list, "test message"); fail("ccan_list_check on empty with bad back ptr didn't fail!"); } else { ok1(strcmp(printf_buffer, expect) == 0); } /* n1 in list. */ list.n.next = &n1; list.n.prev = &n1; n1.prev = &list.n; n1.next = &list.n; ok1(ccan_list_check(&list, NULL) == &list); ok1(ccan_list_check_node(&n1, NULL) == &n1); /* Bad back ptr */ n1.prev = &n1; ok1(ccan_list_check(&list, NULL) == NULL); ok1(ccan_list_check_node(&n1, NULL) == NULL); /* Aborting version. */ sprintf(expect, "test message: prev corrupt in node %p (1) of %p\n", &n1, &list); if (setjmp(aborted) == 0) { ccan_list_check(&list, "test message"); fail("ccan_list_check on n1 bad back ptr didn't fail!"); } else { ok1(strcmp(printf_buffer, expect) == 0); } sprintf(expect, "test message: prev corrupt in node %p (0) of %p\n", &n1, &n1); if (setjmp(aborted) == 0) { ccan_list_check_node(&n1, "test message"); fail("ccan_list_check_node on n1 bad back ptr didn't fail!"); } else { ok1(strcmp(printf_buffer, expect) == 0); } return exit_status(); } ntdb-1.0/lib/ccan/list/test/run-list_del_from-assert.c000066400000000000000000000013561224151530700230010ustar00rootroot00000000000000#define CCAN_LIST_DEBUG 1 #include #include #include #include #include #include #include int main(int argc, char *argv[]) { struct ccan_list_head list1, list2; struct ccan_list_node n1, n2, n3; pid_t child; int status; plan_tests(1); ccan_list_head_init(&list1); ccan_list_head_init(&list2); ccan_list_add(&list1, &n1); ccan_list_add(&list2, &n2); ccan_list_add_tail(&list2, &n3); child = fork(); if (child) { wait(&status); } else { /* This should abort. */ ccan_list_del_from(&list1, &n3); exit(0); } ok1(WIFSIGNALED(status) && WTERMSIG(status) == SIGABRT); ccan_list_del_from(&list2, &n3); return exit_status(); } ntdb-1.0/lib/ccan/list/test/run-single-eval.c000066400000000000000000000116061224151530700210650ustar00rootroot00000000000000/* Make sure macros only evaluate their args once. */ #include #include #include struct parent { const char *name; struct ccan_list_head children; unsigned int num_children; int eval_count; }; struct child { const char *name; struct ccan_list_node list; }; static CCAN_LIST_HEAD(static_list); #define ref(obj, counter) ((counter)++, (obj)) int main(int argc, char *argv[]) { struct parent parent; struct child c1, c2, c3, *c, *n; unsigned int i; unsigned int static_count = 0, parent_count = 0, list_count = 0, node_count = 0; struct ccan_list_head list = CCAN_LIST_HEAD_INIT(list); plan_tests(74); /* Test CCAN_LIST_HEAD, CCAN_LIST_HEAD_INIT, ccan_list_empty and check_list */ ok1(ccan_list_empty(ref(&static_list, static_count))); ok1(static_count == 1); ok1(ccan_list_check(ref(&static_list, static_count), NULL)); ok1(static_count == 2); ok1(ccan_list_empty(ref(&list, list_count))); ok1(list_count == 1); ok1(ccan_list_check(ref(&list, list_count), NULL)); ok1(list_count == 2); parent.num_children = 0; ccan_list_head_init(ref(&parent.children, parent_count)); ok1(parent_count == 1); /* Test ccan_list_head_init */ ok1(ccan_list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 2); ok1(ccan_list_check(ref(&parent.children, parent_count), NULL)); ok1(parent_count == 3); c2.name = "c2"; ccan_list_add(ref(&parent.children, parent_count), &c2.list); ok1(parent_count == 4); /* Test ccan_list_add and !ccan_list_empty. */ ok1(!ccan_list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 5); ok1(c2.list.next == &parent.children.n); ok1(c2.list.prev == &parent.children.n); ok1(parent.children.n.next == &c2.list); ok1(parent.children.n.prev == &c2.list); /* Test ccan_list_check */ ok1(ccan_list_check(ref(&parent.children, parent_count), NULL)); ok1(parent_count == 6); c1.name = "c1"; ccan_list_add(ref(&parent.children, parent_count), &c1.list); ok1(parent_count == 7); /* Test ccan_list_add and !ccan_list_empty. */ ok1(!ccan_list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 8); ok1(c2.list.next == &parent.children.n); ok1(c2.list.prev == &c1.list); ok1(parent.children.n.next == &c1.list); ok1(parent.children.n.prev == &c2.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.n); /* Test ccan_list_check */ ok1(ccan_list_check(ref(&parent.children, parent_count), NULL)); ok1(parent_count == 9); c3.name = "c3"; ccan_list_add_tail(ref(&parent.children, parent_count), &c3.list); ok1(parent_count == 10); /* Test ccan_list_add_tail and !ccan_list_empty. */ ok1(!ccan_list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 11); ok1(parent.children.n.next == &c1.list); ok1(parent.children.n.prev == &c3.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.n); ok1(c2.list.next == &c3.list); ok1(c2.list.prev == &c1.list); ok1(c3.list.next == &parent.children.n); ok1(c3.list.prev == &c2.list); /* Test ccan_list_check */ ok1(ccan_list_check(ref(&parent.children, parent_count), NULL)); ok1(parent_count == 12); /* Test ccan_list_check_node */ ok1(ccan_list_check_node(&c1.list, NULL)); ok1(ccan_list_check_node(&c2.list, NULL)); ok1(ccan_list_check_node(&c3.list, NULL)); /* Test ccan_list_top */ ok1(ccan_list_top(ref(&parent.children, parent_count), struct child, list) == &c1); ok1(parent_count == 13); /* Test ccan_list_tail */ ok1(ccan_list_tail(ref(&parent.children, parent_count), struct child, list) == &c3); ok1(parent_count == 14); /* Test ccan_list_for_each. */ i = 0; ccan_list_for_each(&parent.children, c, list) { switch (i++) { case 0: ok1(c == &c1); break; case 1: ok1(c == &c2); break; case 2: ok1(c == &c3); break; } if (i > 2) break; } ok1(i == 3); /* Test ccan_list_for_each_safe, ccan_list_del and ccan_list_del_from. */ i = 0; ccan_list_for_each_safe(&parent.children, c, n, list) { switch (i++) { case 0: ok1(c == &c1); ccan_list_del(ref(&c->list, node_count)); ok1(node_count == 1); break; case 1: ok1(c == &c2); ccan_list_del_from(ref(&parent.children, parent_count), ref(&c->list, node_count)); ok1(node_count == 2); break; case 2: ok1(c == &c3); ccan_list_del_from(ref(&parent.children, parent_count), ref(&c->list, node_count)); ok1(node_count == 3); break; } ok1(ccan_list_check(ref(&parent.children, parent_count), NULL)); if (i > 2) break; } ok1(i == 3); ok1(parent_count == 19); ok1(ccan_list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 20); /* Test ccan_list_top/ccan_list_tail on empty list. */ ok1(ccan_list_top(ref(&parent.children, parent_count), struct child, list) == NULL); ok1(parent_count == 21); ok1(ccan_list_tail(ref(&parent.children, parent_count), struct child, list) == NULL); ok1(parent_count == 22); return exit_status(); } ntdb-1.0/lib/ccan/list/test/run-with-debug.c000066400000000000000000000001641224151530700207130ustar00rootroot00000000000000/* Just like run.c, but with all debug checks enabled. */ #define CCAN_LIST_DEBUG 1 #include ntdb-1.0/lib/ccan/list/test/run.c000066400000000000000000000117521224151530700166630ustar00rootroot00000000000000#include #include #include #include "helper.h" struct parent { const char *name; struct ccan_list_head children; unsigned int num_children; }; struct child { const char *name; struct ccan_list_node list; }; static CCAN_LIST_HEAD(static_list); int main(int argc, char *argv[]) { struct parent parent; struct child c1, c2, c3, *c, *n; unsigned int i; struct ccan_list_head list = CCAN_LIST_HEAD_INIT(list); opaque_t *q, *nq; struct ccan_list_head opaque_list = CCAN_LIST_HEAD_INIT(opaque_list); plan_tests(65); /* Test CCAN_LIST_HEAD, CCAN_LIST_HEAD_INIT, ccan_list_empty and check_list */ ok1(ccan_list_empty(&static_list)); ok1(ccan_list_check(&static_list, NULL)); ok1(ccan_list_empty(&list)); ok1(ccan_list_check(&list, NULL)); parent.num_children = 0; ccan_list_head_init(&parent.children); /* Test ccan_list_head_init */ ok1(ccan_list_empty(&parent.children)); ok1(ccan_list_check(&parent.children, NULL)); c2.name = "c2"; ccan_list_add(&parent.children, &c2.list); /* Test ccan_list_add and !ccan_list_empty. */ ok1(!ccan_list_empty(&parent.children)); ok1(c2.list.next == &parent.children.n); ok1(c2.list.prev == &parent.children.n); ok1(parent.children.n.next == &c2.list); ok1(parent.children.n.prev == &c2.list); /* Test ccan_list_check */ ok1(ccan_list_check(&parent.children, NULL)); c1.name = "c1"; ccan_list_add(&parent.children, &c1.list); /* Test ccan_list_add and !ccan_list_empty. */ ok1(!ccan_list_empty(&parent.children)); ok1(c2.list.next == &parent.children.n); ok1(c2.list.prev == &c1.list); ok1(parent.children.n.next == &c1.list); ok1(parent.children.n.prev == &c2.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.n); /* Test ccan_list_check */ ok1(ccan_list_check(&parent.children, NULL)); c3.name = "c3"; ccan_list_add_tail(&parent.children, &c3.list); /* Test ccan_list_add_tail and !ccan_list_empty. */ ok1(!ccan_list_empty(&parent.children)); ok1(parent.children.n.next == &c1.list); ok1(parent.children.n.prev == &c3.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.n); ok1(c2.list.next == &c3.list); ok1(c2.list.prev == &c1.list); ok1(c3.list.next == &parent.children.n); ok1(c3.list.prev == &c2.list); /* Test ccan_list_check */ ok1(ccan_list_check(&parent.children, NULL)); /* Test ccan_list_check_node */ ok1(ccan_list_check_node(&c1.list, NULL)); ok1(ccan_list_check_node(&c2.list, NULL)); ok1(ccan_list_check_node(&c3.list, NULL)); /* Test ccan_list_top */ ok1(ccan_list_top(&parent.children, struct child, list) == &c1); /* Test ccan_list_tail */ ok1(ccan_list_tail(&parent.children, struct child, list) == &c3); /* Test ccan_list_for_each. */ i = 0; ccan_list_for_each(&parent.children, c, list) { switch (i++) { case 0: ok1(c == &c1); break; case 1: ok1(c == &c2); break; case 2: ok1(c == &c3); break; } if (i > 2) break; } ok1(i == 3); /* Test ccan_list_for_each_rev. */ i = 0; ccan_list_for_each_rev(&parent.children, c, list) { switch (i++) { case 0: ok1(c == &c3); break; case 1: ok1(c == &c2); break; case 2: ok1(c == &c1); break; } if (i > 2) break; } ok1(i == 3); /* Test ccan_list_for_each_safe, ccan_list_del and ccan_list_del_from. */ i = 0; ccan_list_for_each_safe(&parent.children, c, n, list) { switch (i++) { case 0: ok1(c == &c1); ccan_list_del(&c->list); break; case 1: ok1(c == &c2); ccan_list_del_from(&parent.children, &c->list); break; case 2: ok1(c == &c3); ccan_list_del_from(&parent.children, &c->list); break; } ok1(ccan_list_check(&parent.children, NULL)); if (i > 2) break; } ok1(i == 3); ok1(ccan_list_empty(&parent.children)); /* Test ccan_list_for_each_off. */ ccan_list_add_tail(&opaque_list, (struct ccan_list_node *)create_opaque_blob()); ccan_list_add_tail(&opaque_list, (struct ccan_list_node *)create_opaque_blob()); ccan_list_add_tail(&opaque_list, (struct ccan_list_node *)create_opaque_blob()); i = 0; ccan_list_for_each_off(&opaque_list, q, 0) { i++; ok1(if_blobs_know_the_secret(q)); } ok1(i == 3); /* Test ccan_list_for_each_safe_off, ccan_list_del_off and ccan_list_del_from_off. */ i = 0; ccan_list_for_each_safe_off(&opaque_list, q, nq, 0) { switch (i++) { case 0: ok1(if_blobs_know_the_secret(q)); ccan_list_del_off(q, 0); destroy_opaque_blob(q); break; case 1: ok1(if_blobs_know_the_secret(q)); ccan_list_del_from_off(&opaque_list, q, 0); destroy_opaque_blob(q); break; case 2: ok1(c == &c3); ccan_list_del_from_off(&opaque_list, q, 0); destroy_opaque_blob(q); break; } ok1(ccan_list_check(&opaque_list, NULL)); if (i > 2) break; } ok1(i == 3); ok1(ccan_list_empty(&opaque_list)); /* Test ccan_list_top/ccan_list_tail on empty list. */ ok1(ccan_list_top(&parent.children, struct child, list) == NULL); ok1(ccan_list_tail(&parent.children, struct child, list) == NULL); return exit_status(); } ntdb-1.0/lib/ccan/read_write_all/000077500000000000000000000000001224151530700167305ustar00rootroot00000000000000ntdb-1.0/lib/ccan/read_write_all/LICENSE000066400000000000000000000636351224151530700177520ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/read_write_all/_info000066400000000000000000000017411224151530700177500ustar00rootroot00000000000000#include #include #include "config.h" /** * read_write_all - read_all and write_all routines. * * Successful read and write calls may only partly complete if a * signal is received or they are not operating on a normal file. * * read_all() and write_all() do the looping for you. * * Example: * #include * #include * #include * #include * * #define BUFFER_SIZE 10 * int main(int argc, char *argv[]) * { * char buffer[BUFFER_SIZE+1]; * * if (!read_all(STDIN_FILENO, buffer, BUFFER_SIZE)) * err(1, "Could not read %u characters", BUFFER_SIZE); * buffer[BUFFER_SIZE] = '\0'; * printf("I read '%.*s'\n", BUFFER_SIZE, buffer); * return 0; * } * * License: LGPL (v2.1 or any later version) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { return 0; } return 1; } ntdb-1.0/lib/ccan/read_write_all/read_write_all.c000066400000000000000000000012271224151530700220530ustar00rootroot00000000000000/* Licensed under LGPLv2+ - see LICENSE file for details */ #include "read_write_all.h" #include #include bool write_all(int fd, const void *data, size_t size) { while (size) { ssize_t done; done = write(fd, data, size); if (done < 0 && errno == EINTR) continue; if (done <= 0) return false; data = (const char *)data + done; size -= done; } return true; } bool read_all(int fd, void *data, size_t size) { while (size) { ssize_t done; done = read(fd, data, size); if (done < 0 && errno == EINTR) continue; if (done <= 0) return false; data = (char *)data + done; size -= done; } return true; } ntdb-1.0/lib/ccan/read_write_all/read_write_all.d000066400000000000000000000017121224151530700220530ustar00rootroot00000000000000ccan/read_write_all/read_write_all.o: \ ccan/read_write_all/read_write_all.c \ ccan/read_write_all/read_write_all.h \ /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stddef.h \ /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stdbool.h \ /usr/include/unistd.h /usr/include/features.h \ /usr/include/i386-linux-gnu/bits/predefs.h \ /usr/include/i386-linux-gnu/sys/cdefs.h \ /usr/include/i386-linux-gnu/bits/wordsize.h \ /usr/include/i386-linux-gnu/gnu/stubs.h \ /usr/include/i386-linux-gnu/gnu/stubs-32.h \ /usr/include/i386-linux-gnu/bits/posix_opt.h \ /usr/include/i386-linux-gnu/bits/environments.h \ /usr/include/i386-linux-gnu/bits/types.h \ /usr/include/i386-linux-gnu/bits/typesizes.h \ /usr/include/i386-linux-gnu/bits/confname.h /usr/include/getopt.h \ /usr/include/errno.h /usr/include/i386-linux-gnu/bits/errno.h \ /usr/include/linux/errno.h /usr/include/i386-linux-gnu/asm/errno.h \ /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h ntdb-1.0/lib/ccan/read_write_all/read_write_all.h000066400000000000000000000004441224151530700220600ustar00rootroot00000000000000/* Licensed under LGPLv2+ - see LICENSE file for details */ #ifndef _CCAN_READ_WRITE_H #define _CCAN_READ_WRITE_H #include #include bool write_all(int fd, const void *data, size_t size); bool read_all(int fd, void *data, size_t size); #endif /* _CCAN_READ_WRITE_H */ ntdb-1.0/lib/ccan/read_write_all/test/000077500000000000000000000000001224151530700177075ustar00rootroot00000000000000ntdb-1.0/lib/ccan/read_write_all/test/run-read_all.c000066400000000000000000000033511224151530700224220ustar00rootroot00000000000000/* FIXME: Do something tricky to ensure we really do loop in read_all. */ #include #include #include #include #include #include #include #include #include #include #include static volatile int sigcount; static int p2c[2], c2p[2]; static void got_signal(int sig) { char c = 0; if (write(p2c[1], &c, 1) == 1) sigcount++; } /* < PIPE_BUF *will* be atomic. But > PIPE_BUF only *might* be non-atomic. */ #define BUFSZ (1024*1024) int main(int argc, char *argv[]) { char *buffer; char c = 0; int status; pid_t child; buffer = calloc(BUFSZ, 2); plan_tests(6); /* We fork and torture parent. */ if (pipe(p2c) != 0 || pipe(c2p) != 0) err(1, "pipe"); child = fork(); if (!child) { close(p2c[1]); close(c2p[0]); /* Child. Make sure parent ready, then write in two parts. */ if (read(p2c[0], &c, 1) != 1) exit(1); memset(buffer, 0xff, BUFSZ*2); if (!write_all(c2p[1], buffer, BUFSZ)) exit(2); if (kill(getppid(), SIGUSR1) != 0) exit(3); /* Make sure they get signal. */ if (read(p2c[0], &c, 1) != 1) exit(4); if (write(c2p[1], buffer, BUFSZ) != BUFSZ) exit(5); exit(0); } if (child == -1) err(1, "forking"); close(p2c[0]); close(c2p[1]); signal(SIGUSR1, got_signal); ok1(write(p2c[1], &c, 1) == 1); ok1(read_all(c2p[0], buffer, BUFSZ*2)); ok1(memchr(buffer, 0, BUFSZ*2) == NULL); ok1(sigcount == 1); ok1(wait(&status) == child); ok(WIFEXITED(status) && WEXITSTATUS(status) == 0, "WIFEXITED(status) = %u, WEXITSTATUS(status) = %u", WIFEXITED(status), WEXITSTATUS(status)); free(buffer); return exit_status(); } ntdb-1.0/lib/ccan/read_write_all/test/run-write_all.c000066400000000000000000000024011224151530700226340ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include static ssize_t test_write(int fd, const void *buf, size_t count); #define write test_write #include #undef write static ssize_t write_return; static ssize_t test_write(int fd, const void *buf, size_t count) { if (write_return == 0) { errno = ENOSPC; return 0; } if (write_return < 0) { errno = -write_return; /* Don't return EINTR more than once! */ if (errno == EINTR) write_return = count; return -1; } if (write_return < count) return write_return; return count; } #define BUFSZ 1024 int main(int argc, char *argv[]) { char *buffer; buffer = malloc(BUFSZ); plan_tests(8); write_return = -ENOSPC; ok1(!write_all(100, buffer, BUFSZ)); ok1(errno == ENOSPC); write_return = -EINTR; ok1(write_all(100, buffer, BUFSZ)); ok1(errno == EINTR); write_return = 1; errno = 0; ok1(write_all(100, buffer, BUFSZ)); ok1(errno == 0); write_return = BUFSZ; ok1(write_all(100, buffer, BUFSZ)); ok1(errno == 0); free(buffer); return exit_status(); } ntdb-1.0/lib/ccan/str/000077500000000000000000000000001224151530700145635ustar00rootroot00000000000000ntdb-1.0/lib/ccan/str/_info000066400000000000000000000025021224151530700155770ustar00rootroot00000000000000#include #include #include "config.h" /** * str - string helper routines * * This is a grab bag of functions for string operations, designed to enhance * the standard string.h. * * Note that if you define CCAN_STR_DEBUG, you will get extra compile * checks on common misuses of the following functions (they will now * be out-of-line, so there is a runtime penalty!). * * strstr, strchr, strrchr: * Return const char * if first argument is const (gcc only). * * isalnum, isalpha, isascii, isblank, iscntrl, isdigit, isgraph, * islower, isprint, ispunct, isspace, isupper, isxdigit: * Static and runtime check that input is EOF or an *unsigned* * char, as per C standard (really!). * * Example: * #include * #include * * int main(int argc, char *argv[]) * { * if (argv[1] && streq(argv[1], "--verbose")) * printf("verbose set\n"); * if (argv[1] && strstarts(argv[1], "--")) * printf("Some option set\n"); * if (argv[1] && strends(argv[1], "cow-powers")) * printf("Magic option set\n"); * return 0; * } * * License: Public domain * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/build_assert\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/str/debug.c000066400000000000000000000030111224151530700160100ustar00rootroot00000000000000#include "config.h" #include #include #include #include #ifdef CCAN_STR_DEBUG /* Because we mug the real ones with macros, we need our own wrappers. */ int str_isalnum(int i) { assert(i >= -1 && i < 256); return isalnum(i); } int str_isalpha(int i) { assert(i >= -1 && i < 256); return isalpha(i); } int str_isascii(int i) { assert(i >= -1 && i < 256); return isascii(i); } #if HAVE_ISBLANK int str_isblank(int i) { assert(i >= -1 && i < 256); return isblank(i); } #endif int str_iscntrl(int i) { assert(i >= -1 && i < 256); return iscntrl(i); } int str_isdigit(int i) { assert(i >= -1 && i < 256); return isdigit(i); } int str_isgraph(int i) { assert(i >= -1 && i < 256); return isgraph(i); } int str_islower(int i) { assert(i >= -1 && i < 256); return islower(i); } int str_isprint(int i) { assert(i >= -1 && i < 256); return isprint(i); } int str_ispunct(int i) { assert(i >= -1 && i < 256); return ispunct(i); } int str_isspace(int i) { assert(i >= -1 && i < 256); return isspace(i); } int str_isupper(int i) { assert(i >= -1 && i < 256); return isupper(i); } int str_isxdigit(int i) { assert(i >= -1 && i < 256); return isxdigit(i); } #undef strstr #undef strchr #undef strrchr char *str_strstr(const char *haystack, const char *needle) { return strstr(haystack, needle); } char *str_strchr(const char *haystack, int c) { return strchr(haystack, c); } char *str_strrchr(const char *haystack, int c) { return strrchr(haystack, c); } #endif ntdb-1.0/lib/ccan/str/str.c000066400000000000000000000003421224151530700155360ustar00rootroot00000000000000#include size_t strcount(const char *haystack, const char *needle) { size_t i = 0, nlen = strlen(needle); while ((haystack = strstr(haystack, needle)) != NULL) { i++; haystack += nlen; } return i; } ntdb-1.0/lib/ccan/str/str.h000066400000000000000000000117411224151530700155500ustar00rootroot00000000000000/* Placed into the public domain. */ #ifndef CCAN_STR_H #define CCAN_STR_H #include "config.h" #include #include #include /** * streq - Are two strings equal? * @a: first string * @b: first string * * This macro is arguably more readable than "!strcmp(a, b)". * * Example: * if (streq(somestring, "")) * printf("String is empty!\n"); */ #define streq(a,b) (strcmp((a),(b)) == 0) /** * strstarts - Does this string start with this prefix? * @str: string to test * @prefix: prefix to look for at start of str * * Example: * if (strstarts(somestring, "foo")) * printf("String %s begins with 'foo'!\n", somestring); */ #define strstarts(str,prefix) (strncmp((str),(prefix),strlen(prefix)) == 0) /** * strends - Does this string end with this postfix? * @str: string to test * @postfix: postfix to look for at end of str * * Example: * if (strends(somestring, "foo")) * printf("String %s end with 'foo'!\n", somestring); */ static inline bool strends(const char *str, const char *postfix) { if (strlen(str) < strlen(postfix)) return false; return streq(str + strlen(str) - strlen(postfix), postfix); } /** * stringify - Turn expression into a string literal * @expr: any C expression * * Example: * #define PRINT_COND_IF_FALSE(cond) \ * ((cond) || printf("%s is false!", stringify(cond))) */ #define stringify(expr) stringify_1(expr) /* Double-indirection required to stringify expansions */ #define stringify_1(expr) #expr /** * strcount - Count number of (non-overlapping) occurrences of a substring. * @haystack: a C string * @needle: a substring * * Example: * int i; * i = strcount("aaa aaa", "a"); // i = 6; * i = strcount("aaa aaa", "ab"); // i = 0; * i = strcount("aaa aaa", "aa"); // i = 2; */ size_t strcount(const char *haystack, const char *needle); /** * cisalnum - isalnum() which takes a char (and doesn't accept EOF) * @c: a character * * Surprisingly, the standard ctype.h isalnum() takes an int, which * must have the value of EOF (-1) or an unsigned char. This variant * takes a real char, and doesn't accept EOF. */ static inline bool cisalnum(char c) { return isalnum((unsigned char)c); } static inline bool cisalpha(char c) { return isalpha((unsigned char)c); } static inline bool cisascii(char c) { return isascii((unsigned char)c); } #if HAVE_ISBLANK static inline bool cisblank(char c) { return isblank((unsigned char)c); } #endif static inline bool ciscntrl(char c) { return iscntrl((unsigned char)c); } static inline bool cisdigit(char c) { return isdigit((unsigned char)c); } static inline bool cisgraph(char c) { return isgraph((unsigned char)c); } static inline bool cislower(char c) { return islower((unsigned char)c); } static inline bool cisprint(char c) { return isprint((unsigned char)c); } static inline bool cispunct(char c) { return ispunct((unsigned char)c); } static inline bool cisspace(char c) { return isspace((unsigned char)c); } static inline bool cisupper(char c) { return isupper((unsigned char)c); } static inline bool cisxdigit(char c) { return isxdigit((unsigned char)c); } #include /* These checks force things out of line, hence they are under DEBUG. */ #ifdef CCAN_STR_DEBUG #include /* These are commonly misused: they take -1 or an *unsigned* char value. */ #undef isalnum #undef isalpha #undef isascii #undef isblank #undef iscntrl #undef isdigit #undef isgraph #undef islower #undef isprint #undef ispunct #undef isspace #undef isupper #undef isxdigit /* You can use a char if char is unsigned. */ #if HAVE_BUILTIN_TYPES_COMPATIBLE_P && HAVE_TYPEOF #define str_check_arg_(i) \ ((i) + BUILD_ASSERT_OR_ZERO(!__builtin_types_compatible_p(typeof(i), \ char) \ || (char)255 > 0)) #else #define str_check_arg_(i) (i) #endif #define isalnum(i) str_isalnum(str_check_arg_(i)) #define isalpha(i) str_isalpha(str_check_arg_(i)) #define isascii(i) str_isascii(str_check_arg_(i)) #if HAVE_ISBLANK #define isblank(i) str_isblank(str_check_arg_(i)) #endif #define iscntrl(i) str_iscntrl(str_check_arg_(i)) #define isdigit(i) str_isdigit(str_check_arg_(i)) #define isgraph(i) str_isgraph(str_check_arg_(i)) #define islower(i) str_islower(str_check_arg_(i)) #define isprint(i) str_isprint(str_check_arg_(i)) #define ispunct(i) str_ispunct(str_check_arg_(i)) #define isspace(i) str_isspace(str_check_arg_(i)) #define isupper(i) str_isupper(str_check_arg_(i)) #define isxdigit(i) str_isxdigit(str_check_arg_(i)) #if HAVE_TYPEOF /* With GNU magic, we can make const-respecting standard string functions. */ #undef strstr #undef strchr #undef strrchr /* + 0 is needed to decay array into pointer. */ #define strstr(haystack, needle) \ ((typeof((haystack) + 0))str_strstr((haystack), (needle))) #define strchr(haystack, c) \ ((typeof((haystack) + 0))str_strchr((haystack), (c))) #define strrchr(haystack, c) \ ((typeof((haystack) + 0))str_strrchr((haystack), (c))) #endif #endif /* CCAN_STR_DEBUG */ #endif /* CCAN_STR_H */ ntdb-1.0/lib/ccan/str/str_debug.h000066400000000000000000000013151224151530700167120ustar00rootroot00000000000000#ifndef CCAN_STR_DEBUG_H #define CCAN_STR_DEBUG_H /* #define CCAN_STR_DEBUG 1 */ #ifdef CCAN_STR_DEBUG /* Because we mug the real ones with macros, we need our own wrappers. */ int str_isalnum(int i); int str_isalpha(int i); int str_isascii(int i); #if HAVE_ISBLANK int str_isblank(int i); #endif int str_iscntrl(int i); int str_isdigit(int i); int str_isgraph(int i); int str_islower(int i); int str_isprint(int i); int str_ispunct(int i); int str_isspace(int i); int str_isupper(int i); int str_isxdigit(int i); char *str_strstr(const char *haystack, const char *needle); char *str_strchr(const char *s, int c); char *str_strrchr(const char *s, int c); #endif /* CCAN_STR_DEBUG */ #endif /* CCAN_STR_DEBUG_H */ ntdb-1.0/lib/ccan/str/test/000077500000000000000000000000001224151530700155425ustar00rootroot00000000000000ntdb-1.0/lib/ccan/str/test/compile_fail-isalnum.c000066400000000000000000000005611224151530700220010ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isalnum. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isalnum(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-isalpha.c000066400000000000000000000005611224151530700217520ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isalpha. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isalpha(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-isascii.c000066400000000000000000000005611224151530700217550ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isascii. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isascii(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-isblank.c000066400000000000000000000006531224151530700217560ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF || !HAVE_ISBLANK #error We need typeof to check isblank. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif #if HAVE_ISBLANK return isblank(c); #else return c; #endif } ntdb-1.0/lib/ccan/str/test/compile_fail-iscntrl.c000066400000000000000000000005611224151530700220070ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check iscntrl. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return iscntrl(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-isdigit.c000066400000000000000000000005611224151530700217650ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isdigit. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isdigit(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-islower.c000066400000000000000000000005611224151530700220150ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check islower. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return islower(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-isprint.c000066400000000000000000000005611224151530700220210ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isprint. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isprint(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-ispunct.c000066400000000000000000000005611224151530700220160ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check ispunct. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return ispunct(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-isspace.c000066400000000000000000000005611224151530700217600ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isspace. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isspace(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-isupper.c000066400000000000000000000005611224151530700220200ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isupper. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isupper(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-isxdigit.c000066400000000000000000000005631224151530700221570ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isxdigit. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isxdigit(c); } ntdb-1.0/lib/ccan/str/test/compile_fail-strchr.c000066400000000000000000000004211224151530700216310ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_TYPEOF #error We need typeof to check strstr. #endif #else const #endif char *ret; const char *str = "hello"; ret = strchr(str, 'l'); return ret ? 0 : 1; } ntdb-1.0/lib/ccan/str/test/compile_fail-strrchr.c000066400000000000000000000004221224151530700220140ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_TYPEOF #error We need typeof to check strstr. #endif #else const #endif char *ret; const char *str = "hello"; ret = strrchr(str, 'l'); return ret ? 0 : 1; } ntdb-1.0/lib/ccan/str/test/compile_fail-strstr.c000066400000000000000000000004241224151530700216700ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_TYPEOF #error We need typeof to check strstr. #endif #else const #endif char *ret; const char *str = "hello"; ret = strstr(str, "hell"); return ret ? 0 : 1; } ntdb-1.0/lib/ccan/str/test/debug.c000066400000000000000000000003241224151530700167730ustar00rootroot00000000000000/* We can't use the normal "#include the .c file" trick, since this is contaminated by str.h's macro overrides. So we put it in all tests like this. */ #define CCAN_STR_DEBUG 1 #include ntdb-1.0/lib/ccan/str/test/run.c000066400000000000000000000051241224151530700165140ustar00rootroot00000000000000#include #include #include #include #include #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) static const char *substrings[] = { "far", "bar", "baz", "b", "ba", "z", "ar", NULL }; #define NUM_SUBSTRINGS (ARRAY_SIZE(substrings) - 1) static char *strdup_rev(const char *s) { char *ret = strdup(s); unsigned int i; for (i = 0; i < strlen(s); i++) ret[i] = s[strlen(s) - i - 1]; return ret; } int main(int argc, char *argv[]) { unsigned int i, j, n; char *strings[NUM_SUBSTRINGS * NUM_SUBSTRINGS]; n = 0; for (i = 0; i < NUM_SUBSTRINGS; i++) { for (j = 0; j < NUM_SUBSTRINGS; j++) { strings[n] = malloc(strlen(substrings[i]) + strlen(substrings[j]) + 1); sprintf(strings[n++], "%s%s", substrings[i], substrings[j]); } } plan_tests(n * n * 5 + 16); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { unsigned int k, identical = 0; char *reva, *revb; /* Find first difference. */ for (k = 0; strings[i][k]==strings[j][k]; k++) { if (k == strlen(strings[i])) { identical = 1; break; } } if (identical) ok1(streq(strings[i], strings[j])); else ok1(!streq(strings[i], strings[j])); /* Postfix test should be equivalent to prefix * test on reversed string. */ reva = strdup_rev(strings[i]); revb = strdup_rev(strings[j]); if (!strings[i][k]) { ok1(strstarts(strings[j], strings[i])); ok1(strends(revb, reva)); } else { ok1(!strstarts(strings[j], strings[i])); ok1(!strends(revb, reva)); } if (!strings[j][k]) { ok1(strstarts(strings[i], strings[j])); ok1(strends(reva, revb)); } else { ok1(!strstarts(strings[i], strings[j])); ok1(!strends(reva, revb)); } free(reva); free(revb); } } for (i = 0; i < n; i++) free(strings[i]); ok1(streq(stringify(NUM_SUBSTRINGS), "((sizeof(substrings) / sizeof(substrings[0])) - 1)")); ok1(streq(stringify(ARRAY_SIZE(substrings)), "(sizeof(substrings) / sizeof(substrings[0]))")); ok1(streq(stringify(i == 0), "i == 0")); ok1(strcount("aaaaaa", "b") == 0); ok1(strcount("aaaaaa", "a") == 6); ok1(strcount("aaaaaa", "aa") == 3); ok1(strcount("aaaaaa", "aaa") == 2); ok1(strcount("aaaaaa", "aaaa") == 1); ok1(strcount("aaaaaa", "aaaaa") == 1); ok1(strcount("aaaaaa", "aaaaaa") == 1); ok1(strcount("aaa aaa", "b") == 0); ok1(strcount("aaa aaa", "a") == 6); ok1(strcount("aaa aaa", "aa") == 2); ok1(strcount("aaa aaa", "aaa") == 2); ok1(strcount("aaa aaa", "aaaa") == 0); ok1(strcount("aaa aaa", "aaaaa") == 0); return exit_status(); } ntdb-1.0/lib/ccan/strset/000077500000000000000000000000001224151530700152775ustar00rootroot00000000000000ntdb-1.0/lib/ccan/strset/tools/000077500000000000000000000000001224151530700164375ustar00rootroot00000000000000ntdb-1.0/lib/ccan/strset/tools/Makefile000066400000000000000000000005451224151530700201030ustar00rootroot00000000000000CFLAGS=-Wall -Werror -O3 -I../../.. #CFLAGS=-Wall -Werror -g -I../../.. all: cbspeed speed cbspeed: cbspeed.o ../../talloc.o ../../str_talloc.o ../../grab_file.o ../../str.o ../../time.o ../../noerr.o speed: speed.o ../../talloc.o ../../str_talloc.o ../../grab_file.o ../../str.o ../../time.o ../../noerr.o clean: rm -f cbspeed speed speed.o cbspeed.o ntdb-1.0/lib/ccan/strset/tools/cbspeed.c000066400000000000000000000264601224151530700202200ustar00rootroot00000000000000/* Simple speed tests using original critbit code (modified not to allocate). * * Results on my 32 bit Intel(R) Core(TM) i5 CPU M 560 @ 2.67GHz, gcc 4.5.2: * Run 100 times: Min-Max(Avg) #01: Initial insert: 237-257(239) #02: Initial lookup (match): 180-197(181) #03: Initial lookup (miss): 171-190(172) #04: Initial lookup (random): 441-455(446) #05: Initial delete all: 127-148(128) #06: Initial re-inserting: 219-298(221) #07: Deleting first half: 101-109(102) #08: Adding (a different) half: 159-165(160) #09: Lookup after half-change (match): 203-216(204) #10: Lookup after half-change (miss): 217-225(218) #11: Churn 1: 298-311(300) #12: Churn 2: 298-318(301) #13: Churn 3: 301-322(304) #14: Post-Churn lookup (match): 189-196(190) #15: Post-Churn lookup (miss): 189-197(191) #16: Post-Churn lookup (random): 500-531(506) */ #include #include #include #include #include #include #include #include #include #include /* CRITBIT source */ typedef struct { void *root; } critbit0_tree; int critbit0_contains(critbit0_tree *t, const char *u); int critbit0_insert(critbit0_tree *t, const char *u); int critbit0_delete(critbit0_tree *t, const char *u); void critbit0_clear(critbit0_tree *t); int critbit0_allprefixed(critbit0_tree *t, const char *prefix, int (*handle) (const char *, void *), void *arg); #define uint8 uint8_t #define uint32 uint32_t static size_t allocated; /*2:*/ #include #include #include #include #include typedef struct{ void*child[2]; uint32 byte; uint8 otherbits; }critbit0_node; /*:2*//*3:*/ int critbit0_contains(critbit0_tree*t,const char*u){ const uint8*ubytes= (void*)u; const size_t ulen= strlen(u); uint8*p= t->root; /*4:*/ if(!p)return 0; /*:4*/ /*5:*/ while(1&(intptr_t)p){ critbit0_node*q= (void*)(p-1); /*6:*/ uint8 c= 0; if(q->bytebyte]; const int direction= (1+(q->otherbits|c))>>8; /*:6*/ p= q->child[direction]; } /*:5*/ /*7:*/ return 0==strcmp(u,(const char*)p); /*:7*/ } /*:3*//*8:*/ int critbit0_insert(critbit0_tree*t,const char*u) { const uint8*const ubytes= (void*)u; const size_t ulen= strlen(u); uint8*p= t->root; /*9:*/ if(!p){ #if 0 char*x; int a= posix_memalign((void**)&x,sizeof(void*),ulen+1); if(a)return 0; memcpy(x,u,ulen+1); t->root= x; #else t->root = (char *)u; #endif return 2; } /*:9*/ /*5:*/ while(1&(intptr_t)p){ critbit0_node*q= (void*)(p-1); /*6:*/ uint8 c= 0; if(q->bytebyte]; const int direction= (1+(q->otherbits|c))>>8; /*:6*/ p= q->child[direction]; } /*:5*/ /*10:*/ /*11:*/ uint32 newbyte; uint32 newotherbits; for(newbyte= 0;newbyte>8; /*:12*/ /*:10*/ /*13:*/ /*14:*/ critbit0_node*newnode; if(posix_memalign((void**)&newnode,sizeof(void*),sizeof(critbit0_node)))return 0; allocated++; char*x; #if 0 if(posix_memalign((void**)&x,sizeof(void*),ulen+1)){ free(newnode); return 0; } memcpy(x,ubytes,ulen+1); #else x = (char *)u; #endif newnode->byte= newbyte; newnode->otherbits= newotherbits; newnode->child[1-newdirection]= x; /*:14*/ /*15:*/ void**wherep= &t->root; for(;;){ uint8*p= *wherep; if(!(1&(intptr_t)p))break; critbit0_node*q= (void*)(p-1); if(q->byte> newbyte)break; if(q->byte==newbyte&&q->otherbits> newotherbits)break; uint8 c= 0; if(q->bytebyte]; const int direction= (1+(q->otherbits|c))>>8; wherep= q->child+direction; } newnode->child[newdirection]= *wherep; *wherep= (void*)(1+(char*)newnode); /*:15*/ /*:13*/ return 2; } /*:8*//*16:*/ int critbit0_delete(critbit0_tree*t,const char*u){ const uint8*ubytes= (void*)u; const size_t ulen= strlen(u); uint8*p= t->root; void**wherep= &t->root; void**whereq= 0; critbit0_node*q= 0; int direction= 0; /*17:*/ if(!p)return 0; /*:17*/ /*18:*/ while(1&(intptr_t)p){ whereq= wherep; q= (void*)(p-1); uint8 c= 0; if(q->bytebyte]; direction= (1+(q->otherbits|c))>>8; wherep= q->child+direction; p= *wherep; } /*:18*/ /*19:*/ if(0!=strcmp(u,(const char*)p))return 0; #if 0 free(p); #endif /*:19*/ /*20:*/ if(!whereq){ t->root= 0; return 1; } *whereq= q->child[1-direction]; free(q); allocated--; /*:20*/ return 1; } /*:16*//*21:*/ static void traverse(void*top){ /*22:*/ uint8*p= top; if(1&(intptr_t)p){ critbit0_node*q= (void*)(p-1); traverse(q->child[0]); traverse(q->child[1]); free(q); allocated--; }else{ #if 0 free(p); #endif } /*:22*/ } void critbit0_clear(critbit0_tree*t) { if(t->root)traverse(t->root); t->root= NULL; } /*:21*//*23:*/ static int allprefixed_traverse(uint8*top, int(*handle)(const char*,void*),void*arg){ /*26:*/ if(1&(intptr_t)top){ critbit0_node*q= (void*)(top-1); int direction; for(direction= 0;direction<2;++direction) switch(allprefixed_traverse(q->child[direction],handle,arg)){ case 1:break; case 0:return 0; default:return-1; } return 1; } /*:26*/ /*27:*/ return handle((const char*)top,arg);/*:27*/ } int critbit0_allprefixed(critbit0_tree*t,const char*prefix, int(*handle)(const char*,void*),void*arg){ const uint8*ubytes= (void*)prefix; const size_t ulen= strlen(prefix); uint8*p= t->root; uint8*top= p; size_t i; if(!p)return 1; /*24:*/ while(1&(intptr_t)p){ critbit0_node*q= (void*)(p-1); uint8 c= 0; if(q->bytebyte]; const int direction= (1+(q->otherbits|c))>>8; p= q->child[direction]; if(q->byte #include #include #include #include #include #include #include #include #include #include /* Nanoseconds per operation */ static size_t normalize(const struct timeval *start, const struct timeval *stop, unsigned int num) { struct timeval diff; timersub(stop, start, &diff); /* Floating point is more accurate here. */ return (double)(diff.tv_sec * 1000000 + diff.tv_usec) / num * 1000; } int main(int argc, char *argv[]) { size_t i, j, num; struct timeval start, stop; struct strset set; char **words, **misswords; words = strsplit(NULL, grab_file(NULL, argv[1] ? argv[1] : "/usr/share/dict/words", NULL), "\n"); strset_init(&set); num = talloc_array_length(words) - 1; printf("%zu words\n", num); /* Append and prepend last char for miss testing. */ misswords = talloc_array(words, char *, num); for (i = 0; i < num; i++) { char lastc; if (strlen(words[i])) lastc = words[i][strlen(words[i])-1]; else lastc = 'z'; misswords[i] = talloc_asprintf(misswords, "%c%s%c%c", lastc, words[i], lastc, lastc); } printf("#01: Initial insert: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) strset_set(&set, words[i]); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); #if 0 printf("Nodes allocated: %zu (%zu bytes)\n", allocated, allocated * sizeof(critbit0_node)); #endif printf("#02: Initial lookup (match): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) if (!strset_test(&set, words[i])) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#03: Initial lookup (miss): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) { if (strset_test(&set, misswords[i])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Lookups in order are very cache-friendly for judy; try random */ printf("#04: Initial lookup (random): "); fflush(stdout); start = time_now(); for (i = 0, j = 0; i < num; i++, j = (j + 10007) % num) if (!strset_test(&set, words[j])) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#05: Initial delete all: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) if (!strset_clear(&set, words[i])) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#06: Initial re-inserting: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) strset_set(&set, words[i]); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#07: Deleting first half: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i+=2) if (!strset_clear(&set, words[i])) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#08: Adding (a different) half: "); fflush(stdout); start = time_now(); for (i = 0; i < num; i+=2) strset_set(&set, misswords[i]); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#09: Lookup after half-change (match): "); fflush(stdout); start = time_now(); for (i = 1; i < num; i+=2) if (!strset_test(&set, words[i])) abort(); for (i = 0; i < num; i+=2) { if (!strset_test(&set, misswords[i])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#10: Lookup after half-change (miss): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i+=2) if (strset_test(&set, words[i])) abort(); for (i = 1; i < num; i+=2) { if (strset_test(&set, misswords[i])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Hashtables with delete markers can fill with markers over time. * so do some changes to see how it operates in long-term. */ printf("#11: Churn 1: "); start = time_now(); for (j = 0; j < num; j+=2) { if (!strset_clear(&set, misswords[j])) abort(); if (!strset_set(&set, words[j])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#12: Churn 2: "); start = time_now(); for (j = 1; j < num; j+=2) { if (!strset_clear(&set, words[j])) abort(); if (!strset_set(&set, misswords[j])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#13: Churn 3: "); start = time_now(); for (j = 1; j < num; j+=2) { if (!strset_clear(&set, misswords[j])) abort(); if (!strset_set(&set, words[j])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Now it's back to normal... */ printf("#14: Post-Churn lookup (match): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) if (!strset_test(&set, words[i])) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); printf("#15: Post-Churn lookup (miss): "); fflush(stdout); start = time_now(); for (i = 0; i < num; i++) { if (strset_test(&set, misswords[i])) abort(); } stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); /* Lookups in order are very cache-friendly for judy; try random */ printf("#16: Post-Churn lookup (random): "); fflush(stdout); start = time_now(); for (i = 0, j = 0; i < num; i++, j = (j + 10007) % num) if (!strset_test(&set, words[j])) abort(); stop = time_now(); printf(" %zu ns\n", normalize(&start, &stop, num)); return 0; } ntdb-1.0/lib/ccan/tally/000077500000000000000000000000001224151530700151005ustar00rootroot00000000000000ntdb-1.0/lib/ccan/tally/LICENSE000066400000000000000000000167251224151530700161200ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ntdb-1.0/lib/ccan/tally/_info000066400000000000000000000024421224151530700161170ustar00rootroot00000000000000#include #include #include "config.h" /** * tally - running tally of integers * * The tally module implements simple analysis of a stream of integers. * Numbers are fed in via tally_add(), and then the mean, median, mode and * a histogram can be read out. * * Example: * #include * #include * #include * * int main(int argc, char *argv[]) * { * struct tally *t; * unsigned int i; * size_t err; * ssize_t val; * char *histogram; * * if (argc < 2) * errx(1, "Usage: %s ...\n", argv[0]); * * t = tally_new(100); * for (i = 1; i < argc; i++) * tally_add(t, atol(argv[i])); * * printf("Mean = %zi\n", tally_mean(t)); * val = tally_approx_median(t, &err); * printf("Median = %zi (+/- %zu)\n", val, err); * val = tally_approx_mode(t, &err); * printf("Mode = %zi (+/- %zu)\n", val, err); * histogram = tally_histogram(t, 50, 10); * printf("Histogram:\n%s", histogram); * free(histogram); * return 0; * } * * License: LGPL (v3 or any later version) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/build_assert\n"); printf("ccan/likely\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/tally/tally.c000066400000000000000000000302651224151530700163770ustar00rootroot00000000000000/* Licensed under LGPLv3+ - see LICENSE file for details */ #include #include #include #include #include #include #include #include #include #define SIZET_BITS (sizeof(size_t)*CHAR_BIT) /* We use power of 2 steps. I tried being tricky, but it got buggy. */ struct tally { ssize_t min, max; size_t total[2]; /* This allows limited frequency analysis. */ unsigned buckets, step_bits; size_t counts[1 /* Actually: [buckets] */ ]; }; struct tally *tally_new(unsigned buckets) { struct tally *tally; /* There is always 1 bucket. */ if (buckets == 0) { buckets = 1; } /* Overly cautious check for overflow. */ if (sizeof(*tally) * buckets / sizeof(*tally) != buckets) { return NULL; } tally = (struct tally *)malloc( sizeof(*tally) + sizeof(tally->counts[0])*(buckets-1)); if (tally == NULL) { return NULL; } tally->max = ((size_t)1 << (SIZET_BITS - 1)); tally->min = ~tally->max; tally->total[0] = tally->total[1] = 0; tally->buckets = buckets; tally->step_bits = 0; memset(tally->counts, 0, sizeof(tally->counts[0])*buckets); return tally; } static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val) { /* Don't over-shift. */ if (step_bits == SIZET_BITS) { return 0; } assert(step_bits < SIZET_BITS); return (size_t)(val - min) >> step_bits; } /* Return the min value in bucket b. */ static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b) { /* Don't over-shift. */ if (step_bits == SIZET_BITS) { return min; } assert(step_bits < SIZET_BITS); return min + ((ssize_t)b << step_bits); } /* Does shifting by this many bits truncate the number? */ static bool shift_overflows(size_t num, unsigned bits) { if (bits == 0) { return false; } return ((num << bits) >> 1) != (num << (bits - 1)); } /* When min or max change, we may need to shuffle the frequency counts. */ static void renormalize(struct tally *tally, ssize_t new_min, ssize_t new_max) { size_t range, spill; unsigned int i, old_min; /* Uninitialized? Don't do anything... */ if (tally->max < tally->min) { goto update; } /* If we don't have sufficient range, increase step bits until * buckets cover entire range of ssize_t anyway. */ range = (new_max - new_min) + 1; while (!shift_overflows(tally->buckets, tally->step_bits) && range > ((size_t)tally->buckets << tally->step_bits)) { /* Collapse down. */ for (i = 1; i < tally->buckets; i++) { tally->counts[i/2] += tally->counts[i]; tally->counts[i] = 0; } tally->step_bits++; } /* Now if minimum has dropped, move buckets up. */ old_min = bucket_of(new_min, tally->step_bits, tally->min); memmove(tally->counts + old_min, tally->counts, sizeof(tally->counts[0]) * (tally->buckets - old_min)); memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min); /* If we moved boundaries, adjust buckets to that ratio. */ spill = (tally->min - new_min) % (1 << tally->step_bits); for (i = 0; i < tally->buckets-1; i++) { size_t adjust = (tally->counts[i] >> tally->step_bits) * spill; tally->counts[i] -= adjust; tally->counts[i+1] += adjust; } update: tally->min = new_min; tally->max = new_max; } void tally_add(struct tally *tally, ssize_t val) { ssize_t new_min = tally->min, new_max = tally->max; bool need_renormalize = false; if (val < tally->min) { new_min = val; need_renormalize = true; } if (val > tally->max) { new_max = val; need_renormalize = true; } if (need_renormalize) { renormalize(tally, new_min, new_max); } /* 128-bit arithmetic! If we didn't want exact mean, we could just * pull it out of counts. */ if (val > 0 && tally->total[0] + val < tally->total[0]) { tally->total[1]++; } else if (val < 0 && tally->total[0] + val > tally->total[0]) { tally->total[1]--; } tally->total[0] += val; tally->counts[bucket_of(tally->min, tally->step_bits, val)]++; } size_t tally_num(const struct tally *tally) { size_t i, num = 0; for (i = 0; i < tally->buckets; i++) { num += tally->counts[i]; } return num; } ssize_t tally_min(const struct tally *tally) { return tally->min; } ssize_t tally_max(const struct tally *tally) { return tally->max; } /* FIXME: Own ccan module please! */ static unsigned fls64(uint64_t val) { #if HAVE_BUILTIN_CLZL if (val <= ULONG_MAX) { /* This is significantly faster! */ return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0; } else { #endif uint64_t r = 64; if (!val) { return 0; } if (!(val & 0xffffffff00000000ull)) { val <<= 32; r -= 32; } if (!(val & 0xffff000000000000ull)) { val <<= 16; r -= 16; } if (!(val & 0xff00000000000000ull)) { val <<= 8; r -= 8; } if (!(val & 0xf000000000000000ull)) { val <<= 4; r -= 4; } if (!(val & 0xc000000000000000ull)) { val <<= 2; r -= 2; } if (!(val & 0x8000000000000000ull)) { val <<= 1; r -= 1; } return r; #if HAVE_BUILTIN_CLZL } #endif } /* This is stolen straight from Hacker's Delight. */ static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v) { const uint64_t b = 4294967296ULL; /* Number base (32 bits). */ uint32_t un[4], /* Dividend and divisor */ vn[2]; /* normalized and broken */ /* up into halfwords. */ uint32_t q[2]; /* Quotient as halfwords. */ uint64_t un1, un0, /* Dividend and divisor */ vn0; /* as fullwords. */ uint64_t qhat; /* Estimated quotient digit. */ uint64_t rhat; /* A remainder. */ uint64_t p; /* Product of two digits. */ int64_t s, i, j, t, k; if (u1 >= v) { /* If overflow, return the largest */ return (uint64_t)-1; /* possible quotient. */ } s = 64 - fls64(v); /* 0 <= s <= 63. */ vn0 = v << s; /* Normalize divisor. */ vn[1] = vn0 >> 32; /* Break divisor up into */ vn[0] = vn0 & 0xFFFFFFFF; /* two 32-bit halves. */ // Shift dividend left. un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63); un0 = u0 << s; un[3] = un1 >> 32; /* Break dividend up into */ un[2] = un1; /* four 32-bit halfwords */ un[1] = un0 >> 32; /* Note: storing into */ un[0] = un0; /* halfwords truncates. */ for (j = 1; j >= 0; j--) { /* Compute estimate qhat of q[j]. */ qhat = (un[j+2]*b + un[j+1])/vn[1]; rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1]; again: if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) { qhat = qhat - 1; rhat = rhat + vn[1]; if (rhat < b) { goto again; } } /* Multiply and subtract. */ k = 0; for (i = 0; i < 2; i++) { p = qhat*vn[i]; t = un[i+j] - k - (p & 0xFFFFFFFF); un[i+j] = t; k = (p >> 32) - (t >> 32); } t = un[j+2] - k; un[j+2] = t; q[j] = qhat; /* Store quotient digit. */ if (t < 0) { /* If we subtracted too */ q[j] = q[j] - 1; /* much, add back. */ k = 0; for (i = 0; i < 2; i++) { t = un[i+j] + vn[i] + k; un[i+j] = t; k = t >> 32; } un[j+2] = un[j+2] + k; } } /* End j. */ return q[1]*b + q[0]; } static int64_t divls64(int64_t u1, uint64_t u0, int64_t v) { int64_t q, uneg, vneg, diff, borrow; uneg = u1 >> 63; /* -1 if u < 0. */ if (uneg) { /* Compute the absolute */ u0 = -u0; /* value of the dividend u. */ borrow = (u0 != 0); u1 = -u1 - borrow; } vneg = v >> 63; /* -1 if v < 0. */ v = (v ^ vneg) - vneg; /* Absolute value of v. */ if ((uint64_t)u1 >= (uint64_t)v) { goto overflow; } q = divlu64(u1, u0, v); diff = uneg ^ vneg; /* Negate q if signs of */ q = (q ^ diff) - diff; /* u and v differed. */ if ((diff ^ q) < 0 && q != 0) { /* If overflow, return the largest */ overflow: /* possible neg. quotient. */ q = 0x8000000000000000ULL; } return q; } ssize_t tally_mean(const struct tally *tally) { size_t count = tally_num(tally); if (!count) { return 0; } if (sizeof(tally->total[0]) == sizeof(uint32_t)) { /* Use standard 64-bit arithmetic. */ int64_t total = tally->total[0] | (((uint64_t)tally->total[1]) << 32); return total / count; } return divls64(tally->total[1], tally->total[0], count); } ssize_t tally_total(const struct tally *tally, ssize_t *overflow) { if (overflow) { *overflow = tally->total[1]; return tally->total[0]; } /* If result is negative, make sure we can represent it. */ if (tally->total[1] & ((size_t)1 << (SIZET_BITS-1))) { /* Must have only underflowed once, and must be able to * represent result at ssize_t. */ if ((~tally->total[1])+1 != 0 || (ssize_t)tally->total[0] >= 0) { /* Underflow, return minimum. */ return (ssize_t)((size_t)1 << (SIZET_BITS - 1)); } } else { /* Result is positive, must not have overflowed, and must be * able to represent as ssize_t. */ if (tally->total[1] || (ssize_t)tally->total[0] < 0) { /* Overflow. Return maximum. */ return (ssize_t)~((size_t)1 << (SIZET_BITS - 1)); } } return tally->total[0]; } static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err) { ssize_t min, max; min = bucket_min(tally->min, tally->step_bits, b); if (b == tally->buckets - 1) { max = tally->max; } else { max = bucket_min(tally->min, tally->step_bits, b+1) - 1; } /* FIXME: Think harder about cumulative error; is this enough?. */ *err = (max - min + 1) / 2; /* Avoid overflow. */ return min + (max - min) / 2; } ssize_t tally_approx_median(const struct tally *tally, size_t *err) { size_t count = tally_num(tally), total = 0; unsigned int i; for (i = 0; i < tally->buckets; i++) { total += tally->counts[i]; if (total * 2 >= count) { break; } } return bucket_range(tally, i, err); } ssize_t tally_approx_mode(const struct tally *tally, size_t *err) { unsigned int i, min_best = 0, max_best = 0; for (i = 0; i < tally->buckets; i++) { if (tally->counts[i] > tally->counts[min_best]) { min_best = max_best = i; } else if (tally->counts[i] == tally->counts[min_best]) { max_best = i; } } /* We can have more than one best, making our error huge. */ if (min_best != max_best) { ssize_t min, max; min = bucket_range(tally, min_best, err); max = bucket_range(tally, max_best, err); max += *err; *err += (size_t)(max - min); return min + (max - min) / 2; } return bucket_range(tally, min_best, err); } static unsigned get_max_bucket(const struct tally *tally) { unsigned int i; for (i = tally->buckets; i > 0; i--) { if (tally->counts[i-1]) { break; } } return i; } char *tally_histogram(const struct tally *tally, unsigned width, unsigned height) { unsigned int i, count, max_bucket, largest_bucket; struct tally *tmp; char *graph, *p; assert(width >= TALLY_MIN_HISTO_WIDTH); assert(height >= TALLY_MIN_HISTO_HEIGHT); /* Ignore unused buckets. */ max_bucket = get_max_bucket(tally); /* FIXME: It'd be nice to smooth here... */ if (height >= max_bucket) { height = max_bucket; tmp = NULL; } else { /* We create a temporary then renormalize so < height. */ /* FIXME: Antialias properly! */ tmp = tally_new(tally->buckets); if (!tmp) { return NULL; } tmp->min = tally->min; tmp->max = tally->max; tmp->step_bits = tally->step_bits; memcpy(tmp->counts, tally->counts, sizeof(tally->counts[0]) * tmp->buckets); while ((max_bucket = get_max_bucket(tmp)) >= height) { renormalize(tmp, tmp->min, tmp->max * 2); } /* Restore max */ tmp->max = tally->max; tally = tmp; height = max_bucket; } /* Figure out longest line, for scale. */ largest_bucket = 0; for (i = 0; i < tally->buckets; i++) { if (tally->counts[i] > largest_bucket) { largest_bucket = tally->counts[i]; } } p = graph = (char *)malloc(height * (width + 1) + 1); if (!graph) { free(tmp); return NULL; } for (i = 0; i < height; i++) { unsigned covered = 1, row; /* People expect minimum at the bottom. */ row = height - i - 1; count = (double)tally->counts[row] / largest_bucket * (width-1)+1; if (row == 0) { covered = snprintf(p, width, "%zi", tally->min); } else if (row == height - 1) { covered = snprintf(p, width, "%zi", tally->max); } else if (row == bucket_of(tally->min, tally->step_bits, 0)) { *p = '+'; } else { *p = '|'; } if (covered > width) { covered = width; } p += covered; if (count > covered) { count -= covered; memset(p, '*', count); } else { count = 0; } p += count; *p = '\n'; p++; } *p = '\0'; free(tmp); return graph; } ntdb-1.0/lib/ccan/tally/tally.h000066400000000000000000000064111224151530700164000ustar00rootroot00000000000000/* Licensed under LGPLv3+ - see LICENSE file for details */ #ifndef CCAN_TALLY_H #define CCAN_TALLY_H #include "config.h" #include struct tally; /** * tally_new - allocate the tally structure. * @buckets: the number of frequency buckets. * * This allocates a tally structure using malloc(). The greater the value * of @buckets, the more accurate tally_approx_median() and tally_approx_mode() * and tally_histogram() will be, but more memory is consumed. If you want * to use tally_histogram(), the optimal bucket value is the same as that * @height argument. */ struct tally *tally_new(unsigned int buckets); /** * tally_add - add a value. * @tally: the tally structure. * @val: the value to add. */ void tally_add(struct tally *tally, ssize_t val); /** * tally_num - how many times as tally_add been called? * @tally: the tally structure. */ size_t tally_num(const struct tally *tally); /** * tally_min - the minimum value passed to tally_add. * @tally: the tally structure. * * Undefined if tally_num() == 0. */ ssize_t tally_min(const struct tally *tally); /** * tally_max - the maximum value passed to tally_add. * @tally: the tally structure. * * Undefined if tally_num() == 0. */ ssize_t tally_max(const struct tally *tally); /** * tally_mean - the mean value passed to tally_add. * @tally: the tally structure. * * Undefined if tally_num() == 0, but will not crash. */ ssize_t tally_mean(const struct tally *tally); /** * tally_total - the total value passed to tally_add. * @tally: the tally structure. * @overflow: the overflow value (or NULL). * * If your total can't overflow a ssize_t, you don't need @overflow. * Otherwise, @overflow is the upper ssize_t, and the return value should * be treated as the lower size_t (ie. the sign bit is in @overflow). */ ssize_t tally_total(const struct tally *tally, ssize_t *overflow); /** * tally_approx_median - the approximate median value passed to tally_add. * @tally: the tally structure. * @err: the error in the returned value (ie. real median is +/- @err). * * Undefined if tally_num() == 0, but will not crash. Because we * don't reallocate, we don't store all values, so this median cannot be * exact. */ ssize_t tally_approx_median(const struct tally *tally, size_t *err); /** * tally_approx_mode - the approximate mode value passed to tally_add. * @tally: the tally structure. * @err: the error in the returned value (ie. real mode is +/- @err). * * Undefined if tally_num() == 0, but will not crash. Because we * don't reallocate, we don't store all values, so this mode cannot be * exact. It could well be a value which was never passed to tally_add! */ ssize_t tally_approx_mode(const struct tally *tally, size_t *err); #define TALLY_MIN_HISTO_WIDTH 8 #define TALLY_MIN_HISTO_HEIGHT 3 /** * tally_graph - return an ASCII image of the tally_add distribution * @tally: the tally structure. * @width: the maximum string width to use (>= TALLY_MIN_HISTO_WIDTH) * @height: the maximum string height to use (>= TALLY_MIN_HISTO_HEIGHT) * * Returns a malloc()ed string which draws a multi-line graph of the * distribution of values. On out of memory returns NULL. */ char *tally_histogram(const struct tally *tally, unsigned width, unsigned height); #endif /* CCAN_TALLY_H */ ntdb-1.0/lib/ccan/tally/test/000077500000000000000000000000001224151530700160575ustar00rootroot00000000000000ntdb-1.0/lib/ccan/tally/test/run-bucket_of.c000066400000000000000000000033401224151530700207660ustar00rootroot00000000000000#include #include int main(void) { unsigned int i, max_step; ssize_t min, max; max = (ssize_t)~(1ULL << (sizeof(max)*CHAR_BIT - 1)); min = (ssize_t)(1ULL << (sizeof(max)*CHAR_BIT - 1)); max_step = sizeof(max)*CHAR_BIT; plan_tests(2 + 100 + 10 + 5 + 2 + 100 + 5 + 4 + (1 << 7) * (max_step - 7)); /* Single step, single bucket == easy. */ ok1(bucket_of(0, 0, 0) == 0); /* Double step, still in first bucket. */ ok1(bucket_of(0, 1, 0) == 0); /* Step 8. */ for (i = 0; i < 100; i++) ok1(bucket_of(0, 3, i) == i >> 3); /* 10 values in 5 buckets, step 2. */ for (i = 0; i < 10; i++) ok1(bucket_of(0, 1, i) == i >> 1); /* Extreme cases. */ ok1(bucket_of(min, 0, min) == 0); ok1(bucket_of(min, max_step-1, min) == 0); ok1(bucket_of(min, max_step-1, max) == 1); ok1(bucket_of(min, max_step, min) == 0); ok1(bucket_of(min, max_step, max) == 0); /* Now, bucket_min() should match: */ ok1(bucket_min(0, 0, 0) == 0); /* Double step, val in first bucket still 0. */ ok1(bucket_min(0, 1, 0) == 0); /* Step 8. */ for (i = 0; i < 100; i++) ok1(bucket_min(0, 3, i) == i << 3); /* 10 values in 5 buckets, step 2. */ for (i = 0; i < 5; i++) ok1(bucket_min(0, 1, i) == i << 1); /* Extreme cases. */ ok1(bucket_min(min, 0, 0) == min); ok1(bucket_min(min, max_step-1, 0) == min); ok1(bucket_min(min, max_step-1, 1) == 0); ok1(bucket_min(min, max_step, 0) == min); /* Now, vary step and number of buckets, but bucket_min and bucket_of * must agree. */ for (i = 0; i < (1 << 7); i++) { unsigned int j; for (j = 0; j < max_step - 7; j++) { ssize_t val; val = bucket_min(-(ssize_t)i, j, i); ok1(bucket_of(-(ssize_t)i, j, val) == i); } } return exit_status(); } ntdb-1.0/lib/ccan/tally/test/run-divlu64.c000066400000000000000000000007601224151530700203250ustar00rootroot00000000000000#include #include int main(void) { unsigned int i, j; plan_tests(5985); /* Simple tests. */ for (i = 0; i < 127; i++) { uint64_t u1, u0; if (i < 64) { u1 = 0; u0 = 1ULL << i; j = 0; } else { u1 = 1ULL << (i - 64); u0 = 0; j = i - 63; } for (; j < 63; j++) { uint64_t answer; if (j > i) answer = 0; else answer = 1ULL << (i - j); ok1(divlu64(u1, u0, 1ULL << j) == answer); } } return exit_status(); } ntdb-1.0/lib/ccan/tally/test/run-histogram.c000066400000000000000000000042301224151530700210210ustar00rootroot00000000000000#include #include int main(void) { int i; struct tally *tally; char *graph, *p; plan_tests(100 + 1 + 10 + 1 + 100 + 1 + 10 + 1 + 10 * 2 + 1); /* Uniform distribution, easy. */ tally = tally_new(100); for (i = 0; i < 100; i++) tally_add(tally, i); /* 1:1 height. */ graph = p = tally_histogram(tally, 20, 100); for (i = 0; i < 100; i++) { char *eol = strchr(p, '\n'); /* We expect it filled all way to the end. */ ok1(eol - p == 20); p = eol + 1; } ok1(!*p); free(graph); /* Reduced height. */ graph = p = tally_histogram(tally, 20, 10); for (i = 0; i < 10; i++) { char *eol = strchr(p, '\n'); /* First once can be truncated (bucket aliasing) */ if (eol) { ok1(eol - p == 20 || (eol - p < 20 && i == 0)); } else /* We should, at worst, half-fill graph */ ok1(i > 5); if (eol) p = eol + 1; } ok1(!*p); free(graph); /* Enlarged height (gets capped). */ graph = p = tally_histogram(tally, 20, 1000); for (i = 0; i < 100; i++) { char *eol = strchr(p, '\n'); /* We expect it filled all way to the end. */ ok1(eol - p == 20); p = eol + 1; } ok1(!*p); free(graph); free(tally); /* Distinctive increasing pattern. */ tally = tally_new(10); for (i = 0; i < 10; i++) { unsigned int j; for (j = 0; j <= i; j++) tally_add(tally, i); } graph = p = tally_histogram(tally, 10, 10); for (i = 0; i < 10; i++) { char *eol = strchr(p, '\n'); ok1(eol - p == 10 - i); p = eol + 1; } ok1(!*p); diag("Here's the pretty: %s", graph); free(graph); free(tally); /* With negative values. */ tally = tally_new(10); for (i = 0; i < 10; i++) { tally_add(tally, i - 5); } graph = p = tally_histogram(tally, 10, 10); for (i = 0; i < 10; i++) { char *eol = strchr(p, '\n'); /* We expect it filled all way to the end. */ ok1(eol - p == 10); /* Check min/max labels. */ if (i == 0) ok1(strncmp(p, "4*", 2) == 0); else if (i == 9) ok1(strncmp(p, "-5*", 3) == 0); else if (i == 4) ok1(p[0] == '+'); /* 0 marker */ else ok1(p[0] == '|'); p = eol + 1; } ok1(!*p); diag("Here's the pretty: %s", graph); free(graph); free(tally); return exit_status(); } ntdb-1.0/lib/ccan/tally/test/run-mean.c000066400000000000000000000011651224151530700177500ustar00rootroot00000000000000#include #include int main(void) { int i; struct tally *tally = tally_new(0); ssize_t min, max; max = (ssize_t)~(1ULL << (sizeof(max)*CHAR_BIT - 1)); min = (ssize_t)(1ULL << (sizeof(max)*CHAR_BIT - 1)); plan_tests(100 + 100); /* Simple mean test: should always be 0. */ for (i = 0; i < 100; i++) { tally_add(tally, i); tally_add(tally, -i); ok1(tally_mean(tally) == 0); } /* Works for big values too... */ for (i = 0; i < 100; i++) { tally_add(tally, max - i); tally_add(tally, min + 1 + i); ok1(tally_mean(tally) == 0); } free(tally); return exit_status(); } ntdb-1.0/lib/ccan/tally/test/run-median.c000066400000000000000000000022131224151530700202600ustar00rootroot00000000000000#include #include int main(void) { int i; struct tally *tally = tally_new(100); ssize_t min, max, median; size_t err; max = (ssize_t)~(1ULL << (sizeof(max)*CHAR_BIT - 1)); min = (ssize_t)(1ULL << (sizeof(max)*CHAR_BIT - 1)); plan_tests(100*2 + 100*2 + 100*2); /* Simple median test: should always be around 0. */ for (i = 0; i < 100; i++) { tally_add(tally, i); tally_add(tally, -i); median = tally_approx_median(tally, &err); ok1(err <= 4); ok1(median - (ssize_t)err <= 0 && median + (ssize_t)err >= 0); } /* Works for big values too... */ for (i = 0; i < 100; i++) { tally_add(tally, max - i); tally_add(tally, min + 1 + i); median = tally_approx_median(tally, &err); /* Error should be < 100th of max - min. */ ok1(err <= max / 100 * 2); ok1(median - (ssize_t)err <= 0 && median + (ssize_t)err >= 0); } free(tally); tally = tally_new(10); for (i = 0; i < 100; i++) { tally_add(tally, i); median = tally_approx_median(tally, &err); ok1(err <= i / 10 + 1); ok1(median - (ssize_t)err <= i/2 && median + (ssize_t)err >= i/2); } free(tally); return exit_status(); } ntdb-1.0/lib/ccan/tally/test/run-min-max.c000066400000000000000000000006471224151530700204020ustar00rootroot00000000000000#include #include int main(void) { int i; struct tally *tally = tally_new(0); plan_tests(100 * 4); /* Test max, min and num. */ for (i = 0; i < 100; i++) { tally_add(tally, i); ok1(tally_num(tally) == i*2 + 1); tally_add(tally, -i); ok1(tally_num(tally) == i*2 + 2); ok1(tally_max(tally) == i); ok1(tally_min(tally) == -i); } free(tally); return exit_status(); } ntdb-1.0/lib/ccan/tally/test/run-mode.c000066400000000000000000000021351224151530700177520ustar00rootroot00000000000000#include #include int main(void) { int i; struct tally *tally = tally_new(100); ssize_t min, max, mode; size_t err; max = (ssize_t)~(1ULL << (sizeof(max)*CHAR_BIT - 1)); min = (ssize_t)(1ULL << (sizeof(max)*CHAR_BIT - 1)); plan_tests(100 + 50 + 100 + 100 + 10); /* Simple mode test: should always be around 0 (we add that twice). */ for (i = 0; i < 100; i++) { tally_add(tally, i); tally_add(tally, -i); mode = tally_approx_mode(tally, &err); if (i < 50) ok1(err == 0); ok1(mode - (ssize_t)err <= 0 && mode + (ssize_t)err >= 0); } /* Works for big values too... */ for (i = 0; i < 100; i++) { tally_add(tally, max - i); tally_add(tally, min + 1 + i); mode = tally_approx_mode(tally, &err); ok1(mode - (ssize_t)err <= 0 && mode + (ssize_t)err >= 0); } free(tally); tally = tally_new(10); tally_add(tally, 0); for (i = 0; i < 100; i++) { tally_add(tally, i); mode = tally_approx_mode(tally, &err); if (i < 10) ok1(err == 0); ok1(mode - (ssize_t)err <= 0 && mode + (ssize_t)err >= 0); } free(tally); return exit_status(); } ntdb-1.0/lib/ccan/tally/test/run-renormalize.c000066400000000000000000000010111224151530700213450ustar00rootroot00000000000000#include #include int main(void) { struct tally *tally = tally_new(2); plan_tests(4); tally->min = 0; tally->max = 0; tally->counts[0] = 1; /* This renormalize should do nothing. */ renormalize(tally, 0, 1); ok1(tally->counts[0] == 1); ok1(tally->counts[1] == 0); tally->counts[1]++; /* This renormalize should collapse both into bucket 0. */ renormalize(tally, 0, 3); ok1(tally->counts[0] == 2); ok1(tally->counts[1] == 0); free(tally); return exit_status(); } ntdb-1.0/lib/ccan/tally/test/run-total.c000066400000000000000000000024631224151530700201550ustar00rootroot00000000000000#include #include int main(void) { struct tally *tally; ssize_t total, overflow; ssize_t min, max; max = (ssize_t)~(1ULL << (sizeof(max)*CHAR_BIT - 1)); min = (ssize_t)(1ULL << (sizeof(max)*CHAR_BIT - 1)); plan_tests(15); /* Simple case. */ tally = tally_new(0); tally_add(tally, min); ok1(tally_total(tally, NULL) == min); ok1(tally_total(tally, &overflow) == min); ok1(overflow == -1); /* Underflow. */ tally_add(tally, min); total = tally_total(tally, &overflow); ok1(overflow == -1); ok1((size_t)total == 0); ok1(tally_total(tally, NULL) == min); free(tally); /* Simple case. */ tally = tally_new(0); tally_add(tally, max); ok1(tally_total(tally, NULL) == max); ok1(tally_total(tally, &overflow) == max); ok1(overflow == 0); /* Overflow into sign bit... */ tally_add(tally, max); total = tally_total(tally, &overflow); ok1(overflow == 0); ok1((size_t)total == (size_t)-2); ok1(tally_total(tally, NULL) == max); /* Overflow into upper size_t. */ tally_add(tally, max); total = tally_total(tally, &overflow); ok1(overflow == 1); if (sizeof(size_t) == 4) ok1((size_t)total == 0x7FFFFFFD); else if (sizeof(size_t) == 8) ok1((size_t)total == 0x7FFFFFFFFFFFFFFDULL); ok1(tally_total(tally, NULL) == max); free(tally); return exit_status(); } ntdb-1.0/lib/ccan/tcon/000077500000000000000000000000001224151530700147165ustar00rootroot00000000000000ntdb-1.0/lib/ccan/tcon/_info000066400000000000000000000037761224151530700157500ustar00rootroot00000000000000#include "config.h" #include /** * tcon - routines for creating typesafe generic containers * * This code lets users create a structure with a typecanary; your API * is then a set of macros which check the type canary before calling * the generic routines. * * Example: * #include * #include * * // A simple container class. Can only contain one thing though! * struct container { * void *contents; * }; * static inline void container_add_raw(struct container *c, void *p) * { * c->contents = p; * } * static inline void *container_get_raw(struct container *c) * { * return c->contents; * } * * // This lets the user define their container type; includes a * // "type canary" to check types against. * #define DEFINE_TYPED_CONTAINER_STRUCT(name, type) \ * struct name { struct container raw; TCON(type canary); } * * // These macros make sure the container type and pointer match. * #define container_add(c, p) \ * container_add_raw(&tcon_check((c), canary, (p))->raw, (p)) * #define container_get(c) \ * tcon_cast((c), canary, container_get_raw(&(c)->raw)) * * // Now, let's define two different containers. * DEFINE_TYPED_CONTAINER_STRUCT(int_container, int *); * DEFINE_TYPED_CONTAINER_STRUCT(string_container, char *); * * int main(int argc, char *argv[]) * { * struct int_container ic; * struct string_container sc; * * // We would get a warning if we used the wrong types... * container_add(&ic, &argc); * container_add(&sc, argv[argc-1]); * * printf("Last arg is %s of %i arguments\n", * container_get(&sc), *container_get(&ic) - 1); * return 0; * } * // Given "foo" outputs "Last arg is foo of 1 arguments" * // Given "foo bar" outputs "Last arg is bar of 2 arguments" * * License: Public domain * * Author: Rusty Russell */ int main(int argc, char *argv[]) { /* Expect exactly one argument */ if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { return 0; } return 1; } ntdb-1.0/lib/ccan/tcon/tcon.h000066400000000000000000000066651224151530700160470ustar00rootroot00000000000000/* Placed into the public domain */ #ifndef CCAN_TCON_H #define CCAN_TCON_H #include "config.h" /** * TCON - declare a _tcon type containing canary variables. * @decls: the semi-colon separated list of type canaries. * * This declares a _tcon member for a structure. It should be the * last element in your structure; with sufficient compiler support it * will not use any actual storage. tcon_check() will compare * expressions with one of these "type canaries" to cause warnings if * the container is misused. * * A type of "void *" will allow tcon_check() to pass on any (pointer) type. * * Example: * // Simply typesafe linked list. * struct list_head { * struct list_head *prev, *next; * }; * * struct string_list { * struct list_head raw; * TCON(char *canary); * }; * * // More complex: mapping from one type to another. * struct map { * void *contents; * }; * * struct int_to_string_map { * struct map raw; * TCON(char *charp_canary; int int_canary); * }; */ #if HAVE_FLEXIBLE_ARRAY_MEMBER #define TCON(decls) struct { decls; } _tcon[] #else #define TCON(decls) struct { decls; } _tcon[1] #endif /** * tcon_check - typecheck a typed container * @x: the structure containing the TCON. * @canary: which canary to check against. * @expr: the expression whose type must match the TCON (not evaluated) * * This macro is used to check that the expression is the type * expected for this structure (note the "useless" sizeof() argument * which contains this comparison with the type canary). * * It evaluates to @x so you can chain it. * * Example: * #define tlist_add(h, n, member) \ * list_add(&tcon_check((h), canary, (n))->raw, &(n)->member) */ #define tcon_check(x, canary, expr) \ (sizeof((x)->_tcon[0].canary == (expr)) ? (x) : (x)) /** * tcon_check_ptr - typecheck a typed container * @x: the structure containing the TCON. * @canary: which canary to check against. * @expr: the expression whose type must match &TCON (not evaluated) * * This macro is used to check that the expression is a pointer to the type * expected for this structure (note the "useless" sizeof() argument * which contains this comparison with the type canary), or NULL. * * It evaluates to @x so you can chain it. */ #define tcon_check_ptr(x, canary, expr) \ (sizeof(&(x)->_tcon[0].canary == (expr)) ? (x) : (x)) /** * tcon_type - the type within a container (or void *) * @x: the structure containing the TCON. * @canary: which canary to check against. */ #if HAVE_TYPEOF #define tcon_type(x, canary) __typeof__((x)->_tcon[0].canary) #else #define tcon_type(x, canary) void * #endif /** * tcon_ptr_type - pointer to the type within a container (or void *) * @x: the structure containing the TCON. * @canary: which canary to check against. */ #if HAVE_TYPEOF #define tcon_ptr_type(x, canary) __typeof__(&(x)->_tcon[0].canary) #else #define tcon_ptr_type(x, canary) void * #endif /** * tcon_cast - cast to a canary type for this container (or void *) * @x: a structure containing the TCON. * @canary: which canary to cast to. * @expr: the value to cast * * This is used to cast to the correct type for this container. If the * platform doesn't HAVE_TYPEOF, then it casts to void * (which will * cause a warning if the user doesn't expect a pointer type). */ #define tcon_cast(x, canary, expr) ((tcon_type((x), canary))(expr)) #define tcon_cast_ptr(x, canary, expr) ((tcon_ptr_type((x), canary))(expr)) #endif /* CCAN_TCON_H */ ntdb-1.0/lib/ccan/tcon/test/000077500000000000000000000000001224151530700156755ustar00rootroot00000000000000ntdb-1.0/lib/ccan/tcon/test/compile_fail-tcon_cast.c000066400000000000000000000006741224151530700224460ustar00rootroot00000000000000#include #include struct container { void *p; }; struct int_and_charp_container { struct container raw; TCON(int *tc1; char *tc2); }; int main(int argc, char *argv[]) { struct int_and_charp_container icon; #ifdef FAIL #if !HAVE_TYPEOF #error We cannot detect type problems without HAVE_TYPEOF #endif char * #else int * #endif x; icon.raw.p = NULL; x = tcon_cast(&icon, tc1, icon.raw.p); return 0; } ntdb-1.0/lib/ccan/tcon/test/compile_fail.c000066400000000000000000000005031224151530700204620ustar00rootroot00000000000000#include #include struct container { void *p; }; struct int_container { struct container raw; TCON(int *canary); }; int main(int argc, char *argv[]) { struct int_container icon; #ifdef FAIL char * #else int * #endif x = NULL; tcon_check(&icon, canary, x)->raw.p = x; return 0; } ntdb-1.0/lib/ccan/tcon/test/compile_ok-void.c000066400000000000000000000005731224151530700211260ustar00rootroot00000000000000#include #include struct container { void *p; }; struct void_container { struct container raw; TCON(void *canary); }; int main(int argc, char *argv[]) { struct void_container vcon; tcon_check(&vcon, canary, NULL)->raw.p = NULL; tcon_check(&vcon, canary, argv[0])->raw.p = NULL; tcon_check(&vcon, canary, main)->raw.p = NULL; return 0; } ntdb-1.0/lib/ccan/tcon/test/compile_ok.c000066400000000000000000000007511224151530700201650ustar00rootroot00000000000000#include #include struct container { void *p; }; struct int_container { struct container raw; TCON(int tc); }; struct charp_and_int_container { struct container raw; TCON(int tc1; char *tc2); }; int main(int argc, char *argv[]) { struct int_container icon; struct charp_and_int_container cicon; tcon_check(&icon, tc, 7)->raw.p = NULL; tcon_check(&cicon, tc1, 7)->raw.p = argv[0]; tcon_check(&cicon, tc2, argv[0])->raw.p = argv[0]; return 0; } ntdb-1.0/lib/ccan/time/000077500000000000000000000000001224151530700147115ustar00rootroot00000000000000ntdb-1.0/lib/ccan/time/LICENSE000066400000000000000000000017771224151530700157320ustar00rootroot00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ntdb-1.0/lib/ccan/time/_info000066400000000000000000000016351224151530700157330ustar00rootroot00000000000000#include #include "config.h" /** * time - routines for dealing with time * * This code provides convenient functions for working with time. * * Author: Rusty Russell * License: BSD-MIT * * Example: * #include * #include * #include * #include * * int main(int argc, char *argv[]) * { * struct timeval t; * * if (argc != 2) * errx(1, "Usage: %s ", argv[0]); * * t = time_now(); * if (argv[1][0] == '-') * t = time_sub(t, time_from_msec(atol(argv[1]+1))); * else * t = time_add(t, time_from_msec(atol(argv[1]))); * * printf("%lu.%06u\n", * (unsigned long)t.tv_sec, (unsigned)t.tv_usec); * return 0; * } */ int main(int argc, char *argv[]) { /* Expect exactly one argument */ if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { return 0; } return 1; } ntdb-1.0/lib/ccan/time/test/000077500000000000000000000000001224151530700156705ustar00rootroot00000000000000ntdb-1.0/lib/ccan/time/test/run.c000066400000000000000000000045071224151530700166460ustar00rootroot00000000000000#include #include #include int main(void) { struct timeval t1, t2, t3, zero = { 0, 0 }; plan_tests(46); /* Test time_now */ t1 = time_now(); t2 = time_now(); /* Test time_sub. */ t3 = time_sub(t2, t1); ok1(t3.tv_sec > 0 || t3.tv_usec >= 0); t3 = time_sub(t2, t2); ok1(t3.tv_sec == 0 && t3.tv_usec == 0); t3 = time_sub(t1, t1); ok1(t3.tv_sec == 0 && t3.tv_usec == 0); /* Test time_eq */ ok1(time_eq(t1, t1)); ok1(time_eq(t2, t2)); ok1(!time_eq(t1, t3)); ok1(!time_eq(t2, t3)); /* Make sure t2 > t1. */ t3.tv_sec = 0; t3.tv_usec = 1; t2 = time_add(t2, t3); /* Test time_less and time_greater. */ ok1(!time_eq(t1, t2)); ok1(!time_greater(t1, t2)); ok1(time_less(t1, t2)); ok1(time_greater(t2, t1)); ok1(!time_less(t2, t1)); t3.tv_sec = 0; t3.tv_usec = 999999; t2 = time_add(t2, t3); ok1(!time_eq(t1, t2)); ok1(!time_greater(t1, t2)); ok1(time_less(t1, t2)); ok1(time_greater(t2, t1)); ok1(!time_less(t2, t1)); t3 = time_sub(t2, zero); ok1(time_eq(t3, t2)); t3 = time_sub(t2, t2); ok1(time_eq(t3, zero)); /* time_from_msec / time_to_msec */ t3 = time_from_msec(500); ok1(t3.tv_sec == 0); ok1(t3.tv_usec == 500000); ok1(time_to_msec(t3) == 500); t3 = time_from_msec(1000); ok1(t3.tv_sec == 1); ok1(t3.tv_usec == 0); ok1(time_to_msec(t3) == 1000); t3 = time_from_msec(1500); ok1(t3.tv_sec == 1); ok1(t3.tv_usec == 500000); ok1(time_to_msec(t3) == 1500); /* time_from_usec */ t3 = time_from_usec(500000); ok1(t3.tv_sec == 0); ok1(t3.tv_usec == 500000); ok1(time_to_usec(t3) == 500000); t3 = time_from_usec(1000000); ok1(t3.tv_sec == 1); ok1(t3.tv_usec == 0); ok1(time_to_usec(t3) == 1000000); t3 = time_from_usec(1500000); ok1(t3.tv_sec == 1); ok1(t3.tv_usec == 500000); ok1(time_to_usec(t3) == 1500000); /* Test wrapunder */ t3 = time_sub(time_sub(t2, time_from_msec(500)), time_from_msec(500)); ok1(t3.tv_sec == t2.tv_sec - 1); ok1(t3.tv_usec == t2.tv_usec); /* time_divide and time_multiply */ t1.tv_usec = 100; t1.tv_sec = 100; t3 = time_divide(t1, 2); ok1(t3.tv_sec == 50); ok1(t3.tv_usec == 50); t3 = time_divide(t1, 100); ok1(t3.tv_sec == 1); ok1(t3.tv_usec == 1); t3 = time_multiply(t3, 100); ok1(time_eq(t3, t1)); t3 = time_divide(t1, 200); ok1(t3.tv_sec == 0); ok1(t3.tv_usec == 500000); return exit_status(); } ntdb-1.0/lib/ccan/time/time.c000066400000000000000000000040101224151530700160060ustar00rootroot00000000000000/* Licensed under BSD-MIT - see LICENSE file for details */ #include #include #include struct timeval time_now(void) { struct timeval now; gettimeofday(&now, NULL); return now; } bool time_greater(struct timeval a, struct timeval b) { if (a.tv_sec > b.tv_sec) return true; else if (a.tv_sec < b.tv_sec) return false; return a.tv_usec > b.tv_usec; } bool time_less(struct timeval a, struct timeval b) { if (a.tv_sec < b.tv_sec) return true; else if (a.tv_sec > b.tv_sec) return false; return a.tv_usec < b.tv_usec; } bool time_eq(struct timeval a, struct timeval b) { return a.tv_sec == b.tv_sec && a.tv_usec == b.tv_usec; } struct timeval time_sub(struct timeval recent, struct timeval old) { struct timeval diff; diff.tv_sec = recent.tv_sec - old.tv_sec; if (old.tv_usec > recent.tv_usec) { diff.tv_sec--; diff.tv_usec = 1000000 + recent.tv_usec - old.tv_usec; } else diff.tv_usec = recent.tv_usec - old.tv_usec; assert(diff.tv_sec >= 0); return diff; } struct timeval time_add(struct timeval a, struct timeval b) { struct timeval sum; sum.tv_sec = a.tv_sec + b.tv_sec; sum.tv_usec = a.tv_usec + b.tv_usec; if (sum.tv_usec > 1000000) { sum.tv_sec++; sum.tv_usec -= 1000000; } return sum; } struct timeval time_divide(struct timeval t, unsigned long div) { return time_from_usec(time_to_usec(t) / div); } struct timeval time_multiply(struct timeval t, unsigned long mult) { return time_from_usec(time_to_usec(t) * mult); } uint64_t time_to_msec(struct timeval t) { uint64_t msec; msec = t.tv_usec / 1000 + (uint64_t)t.tv_sec * 1000; return msec; } uint64_t time_to_usec(struct timeval t) { uint64_t usec; usec = t.tv_usec + (uint64_t)t.tv_sec * 1000000; return usec; } struct timeval time_from_msec(uint64_t msec) { struct timeval t; t.tv_usec = (msec % 1000) * 1000; t.tv_sec = msec / 1000; return t; } struct timeval time_from_usec(uint64_t usec) { struct timeval t; t.tv_usec = usec % 1000000; t.tv_sec = usec / 1000000; return t; } ntdb-1.0/lib/ccan/time/time.d000066400000000000000000000024421224151530700160160ustar00rootroot00000000000000ccan/time/time.o: ccan/time/time.c ccan/time/time.h config.h \ /usr/include/i386-linux-gnu/sys/time.h /usr/include/features.h \ /usr/include/i386-linux-gnu/bits/predefs.h \ /usr/include/i386-linux-gnu/sys/cdefs.h \ /usr/include/i386-linux-gnu/bits/wordsize.h \ /usr/include/i386-linux-gnu/gnu/stubs.h \ /usr/include/i386-linux-gnu/gnu/stubs-32.h \ /usr/include/i386-linux-gnu/bits/types.h \ /usr/include/i386-linux-gnu/bits/typesizes.h /usr/include/time.h \ /usr/include/i386-linux-gnu/bits/time.h \ /usr/include/i386-linux-gnu/sys/select.h \ /usr/include/i386-linux-gnu/bits/select.h \ /usr/include/i386-linux-gnu/bits/sigset.h \ /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stdint.h /usr/include/stdint.h \ /usr/include/i386-linux-gnu/bits/wchar.h \ /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stdbool.h \ /usr/include/stdlib.h /usr/lib/gcc/i686-linux-gnu/4.5.4/include/stddef.h \ /usr/include/i386-linux-gnu/bits/waitflags.h \ /usr/include/i386-linux-gnu/bits/waitstatus.h /usr/include/endian.h \ /usr/include/i386-linux-gnu/bits/endian.h \ /usr/include/i386-linux-gnu/bits/byteswap.h /usr/include/xlocale.h \ /usr/include/i386-linux-gnu/sys/types.h \ /usr/include/i386-linux-gnu/sys/sysmacros.h \ /usr/include/i386-linux-gnu/bits/pthreadtypes.h /usr/include/alloca.h \ /usr/include/assert.h ntdb-1.0/lib/ccan/time/time.h000066400000000000000000000101601224151530700160160ustar00rootroot00000000000000/* Licensed under BSD-MIT - see LICENSE file for details */ #ifndef CCAN_TIME_H #define CCAN_TIME_H #include "config.h" #include #include #include /** * time_now - return the current time * * Example: * printf("Now is %lu seconds since epoch\n", (long)time_now().tv_sec); */ struct timeval time_now(void); /** * time_greater - is a after b? * @a: one time. * @b: another time. * * Example: * static bool timed_out(const struct timeval *start) * { * #define TIMEOUT time_from_msec(1000) * return time_greater(time_now(), time_add(*start, TIMEOUT)); * } */ bool time_greater(struct timeval a, struct timeval b); /** * time_less - is a before b? * @a: one time. * @b: another time. * * Example: * static bool still_valid(const struct timeval *start) * { * #define TIMEOUT time_from_msec(1000) * return time_less(time_now(), time_add(*start, TIMEOUT)); * } */ bool time_less(struct timeval a, struct timeval b); /** * time_eq - is a equal to b? * @a: one time. * @b: another time. * * Example: * #include * #include * * // Can we fork in under a microsecond? * static bool fast_fork(void) * { * struct timeval start = time_now(); * if (fork() != 0) { * exit(0); * } * wait(NULL); * return time_eq(start, time_now()); * } */ bool time_eq(struct timeval a, struct timeval b); /** * time_sub - subtract two times * @recent: the larger (more recent) time. * @old: the smaller (less recent) time. * * This returns a well formed struct timeval. * * Example: * static bool was_recent(const struct timeval *start) * { * return time_sub(time_now(), *start).tv_sec < 1; * } */ struct timeval time_sub(struct timeval recent, struct timeval old); /** * time_add - add two times * @a: one time. * @b: another time. * * The times must not overflow, or the results are undefined. * * Example: * // We do one every second. * static struct timeval next_time(void) * { * return time_add(time_now(), time_from_msec(1000)); * } */ struct timeval time_add(struct timeval a, struct timeval b); /** * time_divide - divide a time by a value. * @t: a time. * @div: number to divide it by. * * Example: * // How long does it take to do a fork? * static struct timeval forking_time(void) * { * struct timeval start = time_now(); * unsigned int i; * * for (i = 0; i < 1000; i++) { * if (fork() != 0) { * exit(0); * } * wait(NULL); * } * return time_divide(time_sub(time_now(), start), i); * } */ struct timeval time_divide(struct timeval t, unsigned long div); /** * time_multiply - multiply a time by a value. * @t: a time. * @mult: number to multiply it by. * * Example: * ... * printf("Time to do 100000 forks would be %u sec\n", * (unsigned)time_multiply(forking_time(), 1000000).tv_sec); */ struct timeval time_multiply(struct timeval t, unsigned long mult); /** * time_to_msec - return number of milliseconds * @t: a time * * It's often more convenient to deal with time values as * milliseconds. Note that this will fit into a 32-bit variable if * it's a time difference of less than ~7 weeks. * * Example: * ... * printf("Forking time is %u msec\n", * (unsigned)time_to_msec(forking_time())); */ uint64_t time_to_msec(struct timeval t); /** * time_to_usec - return number of microseconds * @t: a time * * It's often more convenient to deal with time values as * microseconds. Note that this will fit into a 32-bit variable if * it's a time difference of less than ~1 hour. * * Example: * ... * printf("Forking time is %u usec\n", * (unsigned)time_to_usec(forking_time())); * */ uint64_t time_to_usec(struct timeval t); /** * time_from_msec - convert milliseconds to a timeval * @msec: time in milliseconds * * Example: * // 1/2 second timeout * #define TIMEOUT time_from_msec(500) */ struct timeval time_from_msec(uint64_t msec); /** * time_from_usec - convert microseconds to a timeval * @usec: time in microseconds * * Example: * // 1/2 second timeout * #define TIMEOUT time_from_usec(500000) */ struct timeval time_from_usec(uint64_t usec); #endif /* CCAN_TIME_H */ ntdb-1.0/lib/ccan/tlist/000077500000000000000000000000001224151530700151125ustar00rootroot00000000000000ntdb-1.0/lib/ccan/tlist/LICENSE000066400000000000000000000167251224151530700161320ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ntdb-1.0/lib/ccan/tlist/_info000066400000000000000000000030441224151530700161300ustar00rootroot00000000000000#include #include #include "config.h" /** * tlist - typesafe double linked list routines * * The list header contains routines for manipulating double linked lists; * this extends it so you can create list head types which only accomodate * a specific entry type. * * Example: * #include * #include * #include * #include * * // We could use TLIST_TYPE(children, struct child) to define this. * struct tlist_children { * struct list_head raw; * TCON(struct child *canary); * }; * struct parent { * const char *name; * struct tlist_children children; * unsigned int num_children; * }; * * struct child { * const char *name; * struct list_node list; * }; * * int main(int argc, char *argv[]) * { * struct parent p; * struct child *c; * unsigned int i; * * if (argc < 2) * errx(1, "Usage: %s parent children...", argv[0]); * * p.name = argv[1]; * tlist_init(&p.children); * for (i = 2; i < argc; i++) { * c = malloc(sizeof(*c)); * c->name = argv[i]; * tlist_add(&p.children, c, list); * p.num_children++; * } * * printf("%s has %u children:", p.name, p.num_children); * tlist_for_each(&p.children, c, list) * printf("%s ", c->name); * printf("\n"); * return 0; * } * * License: LGPL * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/list\n"); printf("ccan/tcon\n"); return 0; } return 1; } ntdb-1.0/lib/ccan/tlist/test/000077500000000000000000000000001224151530700160715ustar00rootroot00000000000000ntdb-1.0/lib/ccan/tlist/test/compile_fail-tlist_add.c000066400000000000000000000013351224151530700226270ustar00rootroot00000000000000#include TLIST_TYPE(children, struct child); TLIST_TYPE(cousins, struct cousin); struct child { const char *name; struct ccan_list_node list; }; struct cousin { const char *name; struct ccan_list_node list; }; int main(int argc, char *argv[]) { struct tlist_children children; struct tlist_cousins cousins; struct child child = { "child" }; struct cousin cousin = { "cousin" }; tlist_init(&children); tlist_init(&cousins); tlist_add(&children, &child, list); tlist_add(&cousins, &cousin, list); tlist_del_from(&cousins, &cousin, list); #ifdef FAIL #if !HAVE_FLEXIBLE_ARRAY_MEMBER #error Need flexible array members to check type #endif tlist_add(&children, &cousin, list); #endif return 0; } ntdb-1.0/lib/ccan/tlist/test/compile_fail-tlist_add_tail.c000066400000000000000000000013421224151530700236360ustar00rootroot00000000000000#include TLIST_TYPE(children, struct child); TLIST_TYPE(cousins, struct cousin); struct child { const char *name; struct ccan_list_node list; }; struct cousin { const char *name; struct ccan_list_node list; }; int main(int argc, char *argv[]) { struct tlist_children children; struct tlist_cousins cousins; struct child child = { "child" }; struct cousin cousin = { "cousin" }; tlist_init(&children); tlist_init(&cousins); tlist_add(&children, &child, list); tlist_add(&cousins, &cousin, list); tlist_del_from(&cousins, &cousin, list); #ifdef FAIL #if !HAVE_FLEXIBLE_ARRAY_MEMBER #error Need flexible array members to check type #endif tlist_add_tail(&children, &cousin, list); #endif return 0; } ntdb-1.0/lib/ccan/tlist/test/compile_fail-tlist_del_from.c000066400000000000000000000012701224151530700236640ustar00rootroot00000000000000#include TLIST_TYPE(children, struct child); TLIST_TYPE(cousins, struct cousin); struct child { const char *name; struct ccan_list_node list; }; struct cousin { const char *name; struct ccan_list_node list; }; int main(int argc, char *argv[]) { struct tlist_children children; struct tlist_cousins cousins; struct child child = { "child" }; struct cousin cousin = { "cousin" }; tlist_init(&children); tlist_init(&cousins); tlist_add(&children, &child, list); tlist_add(&cousins, &cousin, list); #ifdef FAIL #if !HAVE_FLEXIBLE_ARRAY_MEMBER #error Need flexible array members to check type #endif tlist_del_from(&children, &cousin, list); #endif return 0; } ntdb-1.0/lib/ccan/tlist/test/compile_fail-tlist_for_each.c000066400000000000000000000011641224151530700236450ustar00rootroot00000000000000#include TLIST_TYPE(children, struct child); struct child { const char *name; struct ccan_list_node list; }; struct cousin { const char *name; struct ccan_list_node list; }; int main(int argc, char *argv[]) { struct tlist_children children; struct child child = { "child" }; #ifdef FAIL #if !HAVE_FLEXIBLE_ARRAY_MEMBER #error Need flexible array members to check type #endif struct cousin *c; #else struct child *c; #endif tlist_init(&children); tlist_add(&children, &child, list); tlist_for_each(&children, c, list) (void) c; /* Suppress unused-but-set-variable warning. */ return 0; } ntdb-1.0/lib/ccan/tlist/test/compile_fail-tlist_for_each_safe.c000066400000000000000000000011111224151530700246330ustar00rootroot00000000000000#include TLIST_TYPE(children, struct child); struct child { const char *name; struct ccan_list_node list; }; struct cousin { const char *name; struct ccan_list_node list; }; int main(int argc, char *argv[]) { struct tlist_children children; struct child child = { "child" }; #ifdef FAIL #if !HAVE_FLEXIBLE_ARRAY_MEMBER #error Need flexible array members to check type #endif struct cousin *c, *n; #else struct child *c, *n; #endif tlist_init(&children); tlist_add(&children, &child, list); tlist_for_each_safe(&children, c, n, list); return 0; } ntdb-1.0/lib/ccan/tlist/test/compile_fail-tlist_tail.c000066400000000000000000000010311224151530700230210ustar00rootroot00000000000000#include TLIST_TYPE(children, struct child); struct child { const char *name; struct ccan_list_node list; }; struct cousin { const char *name; struct ccan_list_node list; }; int main(int argc, char *argv[]) { struct tlist_children children; struct child child = { "child" }; #ifdef FAIL struct cousin *c; #else struct child *c; #endif tlist_init(&children); tlist_add(&children, &child, list); c = tlist_tail(&children, list); (void) c; /* Suppress unused-but-set-variable warning. */ return 0; } ntdb-1.0/lib/ccan/tlist/test/compile_fail-tlist_top.c000066400000000000000000000010301224151530700226710ustar00rootroot00000000000000#include TLIST_TYPE(children, struct child); struct child { const char *name; struct ccan_list_node list; }; struct cousin { const char *name; struct ccan_list_node list; }; int main(int argc, char *argv[]) { struct tlist_children children; struct child child = { "child" }; #ifdef FAIL struct cousin *c; #else struct child *c; #endif tlist_init(&children); tlist_add(&children, &child, list); c = tlist_top(&children, list); (void) c; /* Suppress unused-but-set-variable warning. */ return 0; } ntdb-1.0/lib/ccan/tlist/test/run.c000066400000000000000000000066361224151530700170540ustar00rootroot00000000000000#define CCAN_LIST_DEBUG 1 #include #include TLIST_TYPE(children, struct child); struct parent { const char *name; struct tlist_children children; unsigned int num_children; }; struct child { const char *name; struct ccan_list_node list; }; int main(int argc, char *argv[]) { struct parent parent; struct child c1, c2, c3, *c, *n; unsigned int i; struct tlist_children tlist = TLIST_INIT(tlist); plan_tests(48); /* Test TLIST_INIT, and tlist_empty */ ok1(tlist_empty(&tlist)); ok1(tlist_check(&tlist, NULL)); parent.num_children = 0; tlist_init(&parent.children); /* Test tlist_init */ ok1(tlist_empty(&parent.children)); ok1(tlist_check(&parent.children, NULL)); c2.name = "c2"; tlist_add(&parent.children, &c2, list); /* Test tlist_add and !tlist_empty. */ ok1(!tlist_empty(&parent.children)); ok1(c2.list.next == &parent.children.raw.n); ok1(c2.list.prev == &parent.children.raw.n); ok1(parent.children.raw.n.next == &c2.list); ok1(parent.children.raw.n.prev == &c2.list); /* Test tlist_check */ ok1(tlist_check(&parent.children, NULL)); c1.name = "c1"; tlist_add(&parent.children, &c1, list); /* Test ccan_list_add and !ccan_list_empty. */ ok1(!tlist_empty(&parent.children)); ok1(c2.list.next == &parent.children.raw.n); ok1(c2.list.prev == &c1.list); ok1(parent.children.raw.n.next == &c1.list); ok1(parent.children.raw.n.prev == &c2.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.raw.n); /* Test tlist_check */ ok1(tlist_check(&parent.children, NULL)); c3.name = "c3"; tlist_add_tail(&parent.children, &c3, list); /* Test ccan_list_add_tail and !ccan_list_empty. */ ok1(!tlist_empty(&parent.children)); ok1(parent.children.raw.n.next == &c1.list); ok1(parent.children.raw.n.prev == &c3.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.raw.n); ok1(c2.list.next == &c3.list); ok1(c2.list.prev == &c1.list); ok1(c3.list.next == &parent.children.raw.n); ok1(c3.list.prev == &c2.list); /* Test tlist_check */ ok1(tlist_check(&parent.children, NULL)); /* Test tlist_top */ ok1(tlist_top(&parent.children, list) == &c1); /* Test ccan_list_tail */ ok1(tlist_tail(&parent.children, list) == &c3); /* Test tlist_for_each. */ i = 0; tlist_for_each(&parent.children, c, list) { switch (i++) { case 0: ok1(c == &c1); break; case 1: ok1(c == &c2); break; case 2: ok1(c == &c3); break; } if (i > 2) break; } ok1(i == 3); /* Test tlist_for_each_rev. */ i = 0; tlist_for_each_rev(&parent.children, c, list) { switch (i++) { case 0: ok1(c == &c3); break; case 1: ok1(c == &c2); break; case 2: ok1(c == &c1); break; } if (i > 2) break; } ok1(i == 3); /* Test tlist_for_each_safe, tlist_del and tlist_del_from. */ i = 0; tlist_for_each_safe(&parent.children, c, n, list) { switch (i++) { case 0: ok1(c == &c1); tlist_del(c, list); break; case 1: ok1(c == &c2); tlist_del_from(&parent.children, c, list); break; case 2: ok1(c == &c3); tlist_del_from(&parent.children, c, list); break; } ok1(tlist_check(&parent.children, NULL)); if (i > 2) break; } ok1(i == 3); ok1(tlist_empty(&parent.children)); /* Test ccan_list_top/ccan_list_tail on empty list. */ ok1(tlist_top(&parent.children, list) == (struct child *)NULL); ok1(tlist_tail(&parent.children, list) == (struct child *)NULL); return exit_status(); } ntdb-1.0/lib/ccan/tlist/tlist.h000066400000000000000000000167771224151530700164440ustar00rootroot00000000000000/* Licensed under LGPL - see LICENSE file for details */ #ifndef CCAN_TLIST_H #define CCAN_TLIST_H #include #include /** * TLIST_TYPE - declare a typed list type (struct tlist) * @suffix: the name to use (struct tlist_@suffix) * @type: the type the list will contain (void for any type) * * This declares a structure "struct tlist_@suffix" to use for * lists containing this type. The actual list can be accessed using * ".raw" or tlist_raw(). * * Example: * // Defines struct tlist_children * TLIST_TYPE(children, struct child); * struct parent { * const char *name; * struct tlist_children children; * unsigned int num_children; * }; * * struct child { * const char *name; * struct ccan_list_node list; * }; */ #define TLIST_TYPE(suffix, type) \ struct tlist_##suffix { \ struct ccan_list_head raw; \ TCON(type *canary); \ } /** * TLIST_INIT - initalizer for an empty tlist * @name: the name of the list. * * Explicit initializer for an empty list. * * See also: * tlist_init() * * Example: * static struct tlist_children my_list = TLIST_INIT(my_list); */ #define TLIST_INIT(name) { CCAN_LIST_HEAD_INIT(name.raw) } /** * tlist_check - check head of a list for consistency * @h: the tlist_head * @abortstr: the location to print on aborting, or NULL. * * Because list_nodes have redundant information, consistency checking between * the back and forward links can be done. This is useful as a debugging check. * If @abortstr is non-NULL, that will be printed in a diagnostic if the list * is inconsistent, and the function will abort. * * Returns non-NULL if the list is consistent, NULL otherwise (it * can never return NULL if @abortstr is set). * * See also: ccan_list_check() * * Example: * static void dump_parent(struct parent *p) * { * struct child *c; * * printf("%s (%u children):\n", p->name, p->num_children); * tlist_check(&p->children, "bad child list"); * tlist_for_each(&p->children, c, list) * printf(" -> %s\n", c->name); * } */ #define tlist_check(h, abortstr) \ ccan_list_check(&(h)->raw, (abortstr)) /** * tlist_init - initialize a tlist * @h: the tlist to set to the empty list * * Example: * ... * struct parent *parent = malloc(sizeof(*parent)); * * tlist_init(&parent->children); * parent->num_children = 0; */ #define tlist_init(h) ccan_list_head_init(&(h)->raw) /** * tlist_raw - unwrap the typed list and check the type * @h: the tlist * @expr: the expression to check the type against (not evaluated) * * This macro usually causes the compiler to emit a warning if the * variable is of an unexpected type. It is used internally where we * need to access the raw underlying list. */ #define tlist_raw(h, expr) (&tcon_check((h), canary, (expr))->raw) /** * tlist_add - add an entry at the start of a linked list. * @h: the tlist to add the node to * @n: the entry to add to the list. * @member: the member of n to add to the list. * * The entry's ccan_list_node does not need to be initialized; it will be * overwritten. * Example: * struct child *child = malloc(sizeof(*child)); * * child->name = "marvin"; * tlist_add(&parent->children, child, list); * parent->num_children++; */ #define tlist_add(h, n, member) ccan_list_add(tlist_raw((h), (n)), &(n)->member) /** * tlist_add_tail - add an entry at the end of a linked list. * @h: the tlist to add the node to * @n: the entry to add to the list. * @member: the member of n to add to the list. * * The ccan_list_node does not need to be initialized; it will be overwritten. * Example: * tlist_add_tail(&parent->children, child, list); * parent->num_children++; */ #define tlist_add_tail(h, n, member) \ ccan_list_add_tail(tlist_raw((h), (n)), &(n)->member) /** * tlist_del_from - delete an entry from a linked list. * @h: the tlist @n is in * @n: the entry to delete * @member: the member of n to remove from the list. * * This explicitly indicates which list a node is expected to be in, * which is better documentation and can catch more bugs. * * Note that this leaves @n->@member in an undefined state; it * can be added to another list, but not deleted again. * * See also: tlist_del() * * Example: * tlist_del_from(&parent->children, child, list); * parent->num_children--; */ #define tlist_del_from(h, n, member) \ ccan_list_del_from(tlist_raw((h), (n)), &(n)->member) /** * tlist_del - delete an entry from an unknown linked list. * @n: the entry to delete from the list. * @member: the member of @n which is in the list. * * Example: * tlist_del(child, list); * parent->num_children--; */ #define tlist_del(n, member) \ ccan_list_del(&(n)->member) /** * tlist_empty - is a list empty? * @h: the tlist * * If the list is empty, returns true. * * Example: * assert(tlist_empty(&parent->children) == (parent->num_children == 0)); */ #define tlist_empty(h) ccan_list_empty(&(h)->raw) /** * tlist_top - get the first entry in a list * @h: the tlist * @member: the ccan_list_node member of the type * * If the list is empty, returns NULL. * * Example: * struct child *first; * first = tlist_top(&parent->children, list); */ #define tlist_top(h, member) \ ((tcon_type((h), canary)) \ ccan_list_top_(&(h)->raw, \ (char *)(&(h)->_tcon[0].canary->member) - \ (char *)((h)->_tcon[0].canary))) /** * tlist_tail - get the last entry in a list * @h: the tlist * @member: the ccan_list_node member of the type * * If the list is empty, returns NULL. * * Example: * struct child *last; * last = tlist_tail(&parent->children, list); */ #define tlist_tail(h, member) \ ((tcon_type((h), canary)) \ ccan_list_tail_(&(h)->raw, \ (char *)(&(h)->_tcon[0].canary->member) - \ (char *)((h)->_tcon[0].canary))) /** * tlist_for_each - iterate through a list. * @h: the tlist * @i: an iterator of suitable type for this list. * @member: the ccan_list_node member of @i * * This is a convenient wrapper to iterate @i over the entire list. It's * a for loop, so you can break and continue as normal. * * Example: * tlist_for_each(&parent->children, child, list) * printf("Name: %s\n", child->name); */ #define tlist_for_each(h, i, member) \ ccan_list_for_each(tlist_raw((h), (i)), (i), member) /** * tlist_for_each - iterate through a list backwards. * @h: the tlist * @i: an iterator of suitable type for this list. * @member: the ccan_list_node member of @i * * This is a convenient wrapper to iterate @i over the entire list. It's * a for loop, so you can break and continue as normal. * * Example: * tlist_for_each_rev(&parent->children, child, list) * printf("Name: %s\n", child->name); */ #define tlist_for_each_rev(h, i, member) \ ccan_list_for_each_rev(tlist_raw((h), (i)), (i), member) /** * tlist_for_each_safe - iterate through a list, maybe during deletion * @h: the tlist * @i: an iterator of suitable type for this list. * @nxt: another iterator to store the next entry. * @member: the ccan_list_node member of the structure * * This is a convenient wrapper to iterate @i over the entire list. It's * a for loop, so you can break and continue as normal. The extra variable * @nxt is used to hold the next element, so you can delete @i from the list. * * Example: * struct child *next; * tlist_for_each_safe(&parent->children, child, next, list) { * tlist_del(child, list); * parent->num_children--; * } */ #define tlist_for_each_safe(h, i, nxt, member) \ ccan_list_for_each_safe(tlist_raw((h), (i)), (i), (nxt), member) #endif /* CCAN_TLIST_H */ ntdb-1.0/lib/ccan/typesafe_cb/000077500000000000000000000000001224151530700162375ustar00rootroot00000000000000ntdb-1.0/lib/ccan/typesafe_cb/LICENSE000066400000000000000000000636351224151530700172610ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ntdb-1.0/lib/ccan/typesafe_cb/_info000066400000000000000000000105021224151530700172520ustar00rootroot00000000000000#include #include #include "config.h" /** * typesafe_cb - macros for safe callbacks. * * The basis of the typesafe_cb header is typesafe_cb_cast(): a * conditional cast macro. If an expression exactly matches a given * type, it is cast to the target type, otherwise it is left alone. * * This allows us to create functions which take a small number of * specific types, rather than being forced to use a void *. In * particular, it is useful for creating typesafe callbacks as the * helpers typesafe_cb(), typesafe_cb_preargs() and * typesafe_cb_postargs() demonstrate. * * The standard way of passing arguments to callback functions in C is * to use a void pointer, which the callback then casts back to the * expected type. This unfortunately subverts the type checking the * compiler would perform if it were a direct call. Here's an example: * * static void my_callback(void *_obj) * { * struct obj *obj = _obj; * ... * } * ... * register_callback(my_callback, &my_obj); * * If we wanted to use the natural type for my_callback (ie. "void * my_callback(struct obj *obj)"), we could make register_callback() * take a void * as its first argument, but this would subvert all * type checking. We really want register_callback() to accept only * the exactly correct function type to match the argument, or a * function which takes a void *. * * This is where typesafe_cb() comes in: it uses typesafe_cb_cast() to * cast the callback function if it matches the argument type: * * void _register_callback(void (*cb)(void *arg), void *arg); * #define register_callback(cb, arg) \ * _register_callback(typesafe_cb(void, void *, (cb), (arg)), \ * (arg)) * * On compilers which don't support the extensions required * typesafe_cb_cast() and friend become an unconditional cast, so your * code will compile but you won't get type checking. * * Example: * #include * #include * #include * * // Generic callback infrastructure. * struct callback { * struct callback *next; * int value; * int (*callback)(int value, void *arg); * void *arg; * }; * static struct callback *callbacks; * * static void _register_callback(int value, int (*cb)(int, void *), * void *arg) * { * struct callback *new = malloc(sizeof(*new)); * new->next = callbacks; * new->value = value; * new->callback = cb; * new->arg = arg; * callbacks = new; * } * #define register_callback(value, cb, arg) \ * _register_callback(value, \ * typesafe_cb_preargs(int, void *, \ * (cb), (arg), int),\ * (arg)) * * static struct callback *find_callback(int value) * { * struct callback *i; * * for (i = callbacks; i; i = i->next) * if (i->value == value) * return i; * return NULL; * } * * // Define several silly callbacks. Note they don't use void *! * #define DEF_CALLBACK(name, op) \ * static int name(int val, int *arg) \ * { \ * printf("%s", #op); \ * return val op *arg; \ * } * DEF_CALLBACK(multiply, *); * DEF_CALLBACK(add, +); * DEF_CALLBACK(divide, /); * DEF_CALLBACK(sub, -); * DEF_CALLBACK(or, |); * DEF_CALLBACK(and, &); * DEF_CALLBACK(xor, ^); * DEF_CALLBACK(assign, =); * * // Silly game to find the longest chain of values. * int main(int argc, char *argv[]) * { * int i, run = 1, num = argv[1] ? atoi(argv[1]) : 0; * * for (i = 1; i < 1024;) { * // Since run is an int, compiler checks "add" does too. * register_callback(i++, add, &run); * register_callback(i++, divide, &run); * register_callback(i++, sub, &run); * register_callback(i++, multiply, &run); * register_callback(i++, or, &run); * register_callback(i++, and, &run); * register_callback(i++, xor, &run); * register_callback(i++, assign, &run); * } * * printf("%i ", num); * while (run < 56) { * struct callback *cb = find_callback(num % i); * if (!cb) { * printf("-> STOP\n"); * return 1; * } * num = cb->callback(num, cb->arg); * printf("->%i ", num); * run++; * } * printf("-> Winner!\n"); * return 0; * } * * License: LGPL (v2.1 or any later version) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { return 0; } return 1; } ntdb-1.0/lib/ccan/typesafe_cb/test/000077500000000000000000000000001224151530700172165ustar00rootroot00000000000000ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-cast_if_any.c000066400000000000000000000011221224151530700242560ustar00rootroot00000000000000#include #include struct foo { int x; }; struct bar { int x; }; struct baz { int x; }; struct any { int x; }; struct other { int x; }; static void take_any(struct any *any) { } int main(int argc, char *argv[]) { #ifdef FAIL struct other #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if cast_if_type is a noop." #endif #else struct foo #endif *arg = NULL; take_any(cast_if_any(struct any *, arg, arg, struct foo *, struct bar *, struct baz *)); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-cast_if_type-promotable.c000066400000000000000000000007321224151530700266200ustar00rootroot00000000000000#include #include static void _set_some_value(void *val) { } #define set_some_value(expr) \ _set_some_value(typesafe_cb_cast(void *, long, (expr))) int main(int argc, char *argv[]) { #ifdef FAIL bool x = 0; #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else long x = 0; #endif set_some_value(x); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-cast_if_type.c000066400000000000000000000007751224151530700244650ustar00rootroot00000000000000#include void _set_some_value(void *val); void _set_some_value(void *val) { } #define set_some_value(expr) \ _set_some_value(cast_if_type(void *, (expr), (expr), unsigned long)) int main(int argc, char *argv[]) { #ifdef FAIL int x = 0; set_some_value(x); #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if cast_if_type is a noop." #endif #else void *p = 0; set_some_value(p); #endif return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-typesafe_cb-int.c000066400000000000000000000011151224151530700250550ustar00rootroot00000000000000#include #include void _callback(void (*fn)(void *arg), void *arg); void _callback(void (*fn)(void *arg), void *arg) { fn(arg); } /* Callback is set up to warn if arg isn't a pointer (since it won't * pass cleanly to _callback's second arg. */ #define callback(fn, arg) \ _callback(typesafe_cb(void, (fn), (arg)), (arg)) void my_callback(int something); void my_callback(int something) { } int main(int argc, char *argv[]) { #ifdef FAIL /* This fails due to arg, not due to cast. */ callback(my_callback, 100); #endif return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-typesafe_cb.c000066400000000000000000000013101224151530700242620ustar00rootroot00000000000000#include #include static void _register_callback(void (*cb)(void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, void *, (cb), (arg)), (arg)) static void my_callback(char *p) { } int main(int argc, char *argv[]) { char str[] = "hello world"; #ifdef FAIL int *p; #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else char *p; #endif p = NULL; /* This should work always. */ register_callback(my_callback, str); /* This will fail with FAIL defined */ register_callback(my_callback, p); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast-multi.c000066400000000000000000000011351224151530700264310ustar00rootroot00000000000000#include #include struct foo { int x; }; struct bar { int x; }; struct baz { int x; }; struct any { int x; }; struct other { int x; }; static void take_any(struct any *any) { } int main(int argc, char *argv[]) { #ifdef FAIL struct other #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else struct foo #endif *arg = NULL; take_any(typesafe_cb_cast3(struct any *, struct foo *, struct bar *, struct baz *, arg)); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast.c000066400000000000000000000007751224151530700253120ustar00rootroot00000000000000#include void _set_some_value(void *val); void _set_some_value(void *val) { } #define set_some_value(expr) \ _set_some_value(typesafe_cb_cast(void *, unsigned long, (expr))) int main(int argc, char *argv[]) { #ifdef FAIL int x = 0; set_some_value(x); #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else void *p = 0; set_some_value(p); #endif return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-typesafe_cb_exact.c000066400000000000000000000013201224151530700254470ustar00rootroot00000000000000#include #include static void _register_callback(void (*cb)(void *arg), const void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb_exact(void, (cb), (arg)), (arg)) static void my_callback(const char *p) { } int main(int argc, char *argv[]) { #ifdef FAIL char *p; #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if cast_if_type is a noop." #endif #else const char *p; #endif p = NULL; /* This should work always. */ register_callback(my_callback, (const char *)"hello world"); /* This will fail with FAIL defined */ register_callback(my_callback, p); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-typesafe_cb_postargs.c000066400000000000000000000011261224151530700262110ustar00rootroot00000000000000#include #include static void _register_callback(void (*cb)(void *arg, int x), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb_postargs(void, void *, (cb), (arg), int), (arg)) static void my_callback(char *p, int x) { } int main(int argc, char *argv[]) { #ifdef FAIL int *p; #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else char *p; #endif p = NULL; register_callback(my_callback, p); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_fail-typesafe_cb_preargs.c000066400000000000000000000011261224151530700260120ustar00rootroot00000000000000#include #include static void _register_callback(void (*cb)(int x, void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb_preargs(void, void *, (cb), (arg), int), (arg)) static void my_callback(int x, char *p) { } int main(int argc, char *argv[]) { #ifdef FAIL int *p; #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else char *p; #endif p = NULL; register_callback(my_callback, p); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_ok-cast_if_any.c000066400000000000000000000013251224151530700237610ustar00rootroot00000000000000#include #include struct foo { int x; }; struct bar { int x; }; struct baz { int x; }; struct any { int x; }; static void take_any(struct any *any) { } int main(int argc, char *argv[]) { #if HAVE_TYPEOF /* Otherwise we get unused warnings for these. */ struct foo *foo = NULL; struct bar *bar = NULL; struct baz *baz = NULL; #endif struct other *arg = NULL; take_any(cast_if_any(struct any *, arg, foo, struct foo *, struct bar *, struct baz *)); take_any(cast_if_any(struct any *, arg, bar, struct foo *, struct bar *, struct baz *)); take_any(cast_if_any(struct any *, arg, baz, struct foo *, struct bar *, struct baz *)); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_ok-typesafe_cb-NULL.c000066400000000000000000000006451224151530700245420ustar00rootroot00000000000000#include #include /* NULL args for callback function should be OK for normal and _def. */ static void _register_callback(void (*cb)(const void *arg), const void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, const void *, (cb), (arg)), (arg)) int main(int argc, char *argv[]) { register_callback(NULL, "hello world"); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_ok-typesafe_cb-const.c000066400000000000000000000022601224151530700251110ustar00rootroot00000000000000#include #include /* const args in callbacks should be OK. */ static void _register_callback(void (*cb)(void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, (cb), (arg)), (arg)) #define register_callback_def(cb, arg) \ _register_callback(typesafe_cb_def(void, (cb), (arg)), (arg)) static void _register_callback_pre(void (*cb)(int x, void *arg), void *arg) { } #define register_callback_pre(cb, arg) \ _register_callback_pre(typesafe_cb_preargs(void, (cb), (arg), int), (arg)) static void _register_callback_post(void (*cb)(void *arg, int x), void *arg) { } #define register_callback_post(cb, arg) \ _register_callback_post(typesafe_cb_postargs(void, (cb), (arg), int), (arg)) static void my_callback(const char *p) { } static void my_callback_pre(int x, /*const*/ char *p) { } static void my_callback_post(/*const*/ char *p, int x) { } int main(int argc, char *argv[]) { char p[] = "hello world"; register_callback(my_callback, p); register_callback_def(my_callback, p); register_callback_pre(my_callback_pre, p); register_callback_post(my_callback_post, p); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_ok-typesafe_cb-undefined.c000066400000000000000000000021641224151530700257270ustar00rootroot00000000000000#include #include /* const args in callbacks should be OK. */ static void _register_callback(void (*cb)(void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, void *, (cb), (arg)), (arg)) static void _register_callback_pre(void (*cb)(int x, void *arg), void *arg) { } #define register_callback_pre(cb, arg) \ _register_callback_pre(typesafe_cb_preargs(void, void *, (cb), (arg), int), (arg)) static void _register_callback_post(void (*cb)(void *arg, int x), void *arg) { } #define register_callback_post(cb, arg) \ _register_callback_post(typesafe_cb_postargs(void, void *, (cb), (arg), int), (arg)) struct undefined; static void my_callback(struct undefined *undef) { } static void my_callback_pre(int x, struct undefined *undef) { } static void my_callback_post(struct undefined *undef, int x) { } int main(int argc, char *argv[]) { struct undefined *handle = NULL; register_callback(my_callback, handle); register_callback_pre(my_callback_pre, handle); register_callback_post(my_callback_post, handle); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_ok-typesafe_cb-vars.c000066400000000000000000000024111224151530700247340ustar00rootroot00000000000000#include #include /* const args in callbacks should be OK. */ static void _register_callback(void (*cb)(void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, void *, (cb), (arg)), (arg)) static void _register_callback_pre(void (*cb)(int x, void *arg), void *arg) { } #define register_callback_pre(cb, arg) \ _register_callback_pre(typesafe_cb_preargs(void, void *, (cb), (arg), int), (arg)) static void _register_callback_post(void (*cb)(void *arg, int x), void *arg) { } #define register_callback_post(cb, arg) \ _register_callback_post(typesafe_cb_postargs(void, void *, (cb), (arg), int), (arg)) struct undefined; static void my_callback(struct undefined *undef) { } static void my_callback_pre(int x, struct undefined *undef) { } static void my_callback_post(struct undefined *undef, int x) { } int main(int argc, char *argv[]) { struct undefined *handle = NULL; void (*cb)(struct undefined *undef) = my_callback; void (*pre)(int x, struct undefined *undef) = my_callback_pre; void (*post)(struct undefined *undef, int x) = my_callback_post; register_callback(cb, handle); register_callback_pre(pre, handle); register_callback_post(post, handle); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_ok-typesafe_cb-volatile.c000066400000000000000000000021311224151530700255770ustar00rootroot00000000000000#include #include /* volatile args in callbacks should be OK. */ static void _register_callback(void (*cb)(void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, (cb), (arg)), (arg)) static void _register_callback_pre(void (*cb)(int x, void *arg), void *arg) { } #define register_callback_pre(cb, arg) \ _register_callback_pre(typesafe_cb_preargs(void, (cb), (arg), int), (arg)) static void _register_callback_post(void (*cb)(void *arg, int x), void *arg) { } #define register_callback_post(cb, arg) \ _register_callback_post(typesafe_cb_postargs(void, (cb), (arg), int), (arg)) static void my_callback(volatile char *p) { } /* FIXME: Can't handle volatile for these */ static void my_callback_pre(int x, /* volatile */ char *p) { } static void my_callback_post(/* volatile */ char *p, int x) { } int main(int argc, char *argv[]) { char p[] = "hello world"; register_callback(my_callback, p); register_callback_pre(my_callback_pre, p); register_callback_post(my_callback_post, p); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_ok-typesafe_cb_cast.c000066400000000000000000000012701224151530700247770ustar00rootroot00000000000000#include #include struct foo { int x; }; struct bar { int x; }; struct baz { int x; }; struct any { int x; }; static void take_any(struct any *any) { } int main(int argc, char *argv[]) { /* Otherwise we get unused warnings for these. */ struct foo *foo = NULL; struct bar *bar = NULL; struct baz *baz = NULL; take_any(typesafe_cb_cast3(struct any *, struct foo *, struct bar *, struct baz *, foo)); take_any(typesafe_cb_cast3(struct any *, struct foo *, struct bar *, struct baz *, bar)); take_any(typesafe_cb_cast3(struct any *, struct foo *, struct bar *, struct baz *, baz)); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/compile_ok-typesafe_cb_def-const.c000066400000000000000000000020341224151530700257260ustar00rootroot00000000000000#include #include /* const args in callbacks should be OK. */ static void _register_callback(void (*cb)(void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, (cb), (arg)), (arg)) static void _register_callback_pre(void (*cb)(int x, void *arg), void *arg) { } #define register_callback_pre(cb, arg) \ _register_callback_pre(typesafe_cb_preargs(void, (cb), (arg), int), (arg)) static void _register_callback_post(void (*cb)(void *arg, int x), void *arg) { } #define register_callback_post(cb, arg) \ _register_callback_post(typesafe_cb_postargs(void, (cb), (arg), int), (arg)) static void my_callback(const char *p) { } static void my_callback_pre(int x, /*const*/ char *p) { } static void my_callback_post(/*const*/ char *p, int x) { } int main(int argc, char *argv[]) { char p[] = "hello world"; register_callback(my_callback, p); register_callback_pre(my_callback_pre, p); register_callback_post(my_callback_post, p); return 0; } ntdb-1.0/lib/ccan/typesafe_cb/test/run.c000066400000000000000000000045621224151530700201750ustar00rootroot00000000000000#include #include #include #include static char dummy = 0; /* The example usage. */ static void _set_some_value(void *val) { ok1(val == &dummy); } #define set_some_value(expr) \ _set_some_value(typesafe_cb_cast(void *, unsigned long, (expr))) static void _callback_onearg(void (*fn)(void *arg), void *arg) { fn(arg); } static void _callback_preargs(void (*fn)(int a, int b, void *arg), void *arg) { fn(1, 2, arg); } static void _callback_postargs(void (*fn)(void *arg, int a, int b), void *arg) { fn(arg, 1, 2); } #define callback_onearg(cb, arg) \ _callback_onearg(typesafe_cb(void, void *, (cb), (arg)), (arg)) #define callback_preargs(cb, arg) \ _callback_preargs(typesafe_cb_preargs(void, void *, (cb), (arg), int, int), (arg)) #define callback_postargs(cb, arg) \ _callback_postargs(typesafe_cb_postargs(void, void *, (cb), (arg), int, int), (arg)) static void my_callback_onearg(char *p) { ok1(strcmp(p, "hello world") == 0); } static void my_callback_preargs(int a, int b, char *p) { ok1(a == 1); ok1(b == 2); ok1(strcmp(p, "hello world") == 0); } static void my_callback_postargs(char *p, int a, int b) { ok1(a == 1); ok1(b == 2); ok1(strcmp(p, "hello world") == 0); } /* This is simply a compile test; we promised typesafe_cb_cast can be in a * static initializer. */ struct callback_onearg { void (*fn)(void *arg); const void *arg; }; struct callback_onearg cb_onearg = { typesafe_cb(void, void *, my_callback_onearg, (char *)(intptr_t)"hello world"), "hello world" }; struct callback_preargs { void (*fn)(int a, int b, void *arg); const void *arg; }; struct callback_preargs cb_preargs = { typesafe_cb_preargs(void, void *, my_callback_preargs, (char *)(intptr_t)"hi", int, int), "hi" }; struct callback_postargs { void (*fn)(void *arg, int a, int b); const void *arg; }; struct callback_postargs cb_postargs = { typesafe_cb_postargs(void, void *, my_callback_postargs, (char *)(intptr_t)"hi", int, int), "hi" }; int main(int argc, char *argv[]) { void *p = &dummy; unsigned long l = (unsigned long)p; char str[] = "hello world"; plan_tests(2 + 1 + 3 + 3); set_some_value(p); set_some_value(l); callback_onearg(my_callback_onearg, str); callback_preargs(my_callback_preargs, str); callback_postargs(my_callback_postargs, str); return exit_status(); } ntdb-1.0/lib/ccan/typesafe_cb/typesafe_cb.h000066400000000000000000000116401224151530700206760ustar00rootroot00000000000000/* Licensed under LGPLv2.1+ - see LICENSE file for details */ #ifndef CCAN_TYPESAFE_CB_H #define CCAN_TYPESAFE_CB_H #include "config.h" #if HAVE_TYPEOF && HAVE_BUILTIN_CHOOSE_EXPR && HAVE_BUILTIN_TYPES_COMPATIBLE_P /** * typesafe_cb_cast - only cast an expression if it matches a given type * @desttype: the type to cast to * @oktype: the type we allow * @expr: the expression to cast * * This macro is used to create functions which allow multiple types. * The result of this macro is used somewhere that a @desttype type is * expected: if @expr is exactly of type @oktype, then it will be * cast to @desttype type, otherwise left alone. * * This macro can be used in static initializers. * * This is merely useful for warnings: if the compiler does not * support the primitives required for typesafe_cb_cast(), it becomes an * unconditional cast, and the @oktype argument is not used. In * particular, this means that @oktype can be a type which uses the * "typeof": it will not be evaluated if typeof is not supported. * * Example: * // We can take either an unsigned long or a void *. * void _set_some_value(void *val); * #define set_some_value(e) \ * _set_some_value(typesafe_cb_cast(void *, (e), unsigned long)) */ #define typesafe_cb_cast(desttype, oktype, expr) \ __builtin_choose_expr( \ __builtin_types_compatible_p(__typeof__(0?(expr):(expr)), \ oktype), \ (desttype)(expr), (expr)) #else #define typesafe_cb_cast(desttype, oktype, expr) ((desttype)(expr)) #endif /** * typesafe_cb_cast3 - only cast an expression if it matches given types * @desttype: the type to cast to * @ok1: the first type we allow * @ok2: the second type we allow * @ok3: the third type we allow * @expr: the expression to cast * * This is a convenient wrapper for multiple typesafe_cb_cast() calls. * You can chain them inside each other (ie. use typesafe_cb_cast() * for expr) if you need more than 3 arguments. * * Example: * // We can take either a long, unsigned long, void * or a const void *. * void _set_some_value(void *val); * #define set_some_value(expr) \ * _set_some_value(typesafe_cb_cast3(void *,, \ * long, unsigned long, const void *,\ * (expr))) */ #define typesafe_cb_cast3(desttype, ok1, ok2, ok3, expr) \ typesafe_cb_cast(desttype, ok1, \ typesafe_cb_cast(desttype, ok2, \ typesafe_cb_cast(desttype, ok3, \ (expr)))) /** * typesafe_cb - cast a callback function if it matches the arg * @rtype: the return type of the callback function * @atype: the (pointer) type which the callback function expects. * @fn: the callback function to cast * @arg: the (pointer) argument to hand to the callback function. * * If a callback function takes a single argument, this macro does * appropriate casts to a function which takes a single atype argument if the * callback provided matches the @arg. * * It is assumed that @arg is of pointer type: usually @arg is passed * or assigned to a void * elsewhere anyway. * * Example: * void _register_callback(void (*fn)(void *arg), void *arg); * #define register_callback(fn, arg) \ * _register_callback(typesafe_cb(void, (fn), void*, (arg)), (arg)) */ #define typesafe_cb(rtype, atype, fn, arg) \ typesafe_cb_cast(rtype (*)(atype), \ rtype (*)(__typeof__(arg)), \ (fn)) /** * typesafe_cb_preargs - cast a callback function if it matches the arg * @rtype: the return type of the callback function * @atype: the (pointer) type which the callback function expects. * @fn: the callback function to cast * @arg: the (pointer) argument to hand to the callback function. * * This is a version of typesafe_cb() for callbacks that take other arguments * before the @arg. * * Example: * void _register_callback(void (*fn)(int, void *arg), void *arg); * #define register_callback(fn, arg) \ * _register_callback(typesafe_cb_preargs(void, void *, \ * (fn), (arg), int), \ * (arg)) */ #define typesafe_cb_preargs(rtype, atype, fn, arg, ...) \ typesafe_cb_cast(rtype (*)(__VA_ARGS__, atype), \ rtype (*)(__VA_ARGS__, __typeof__(arg)), \ (fn)) /** * typesafe_cb_postargs - cast a callback function if it matches the arg * @rtype: the return type of the callback function * @atype: the (pointer) type which the callback function expects. * @fn: the callback function to cast * @arg: the (pointer) argument to hand to the callback function. * * This is a version of typesafe_cb() for callbacks that take other arguments * after the @arg. * * Example: * void _register_callback(void (*fn)(void *arg, int), void *arg); * #define register_callback(fn, arg) \ * _register_callback(typesafe_cb_postargs(void, (fn), void *, \ * (arg), int), \ * (arg)) */ #define typesafe_cb_postargs(rtype, atype, fn, arg, ...) \ typesafe_cb_cast(rtype (*)(atype, __VA_ARGS__), \ rtype (*)(__typeof__(arg), __VA_ARGS__), \ (fn)) #endif /* CCAN_CAST_IF_TYPE_H */ ntdb-1.0/lib/ccan/wscript000066400000000000000000000215751224151530700154030ustar00rootroot00000000000000#!/usr/bin/env python import Logs, sys, Options def configure(conf): conf.DEFINE('HAVE_CCAN', 1) conf.CHECK_HEADERS('err.h') conf.CHECK_HEADERS('byteswap.h') conf.CHECK_FUNCS('bswap_64', link=False, headers="byteswap.h") conf.CHECK_CODE('int __attribute__((cold)) func(int x) { return x; }', addmain=False, link=False, cflags=conf.env['WERROR_CFLAGS'], define='HAVE_ATTRIBUTE_COLD') conf.CHECK_CODE('int __attribute__((const)) func(int x) { return x; }', addmain=False, link=False, cflags=conf.env['WERROR_CFLAGS'], define='HAVE_ATTRIBUTE_CONST') conf.CHECK_CODE('void __attribute__((noreturn)) func(int x) { exit(x); }', addmain=False, link=False, cflags=conf.env['WERROR_CFLAGS'], define='HAVE_ATTRIBUTE_NORETURN') conf.CHECK_CODE('void __attribute__((format(__printf__, 1, 2))) func(const char *fmt, ...) { }', addmain=False, link=False, cflags=conf.env['WERROR_CFLAGS'], define='HAVE_ATTRIBUTE_PRINTF') conf.CHECK_CODE('int __attribute__((unused)) func(int x) { return x; }', addmain=False, link=False, cflags=conf.env['WERROR_CFLAGS'], define='HAVE_ATTRIBUTE_UNUSED') conf.CHECK_CODE('int __attribute__((used)) func(int x) { return x; }', addmain=False, link=False, cflags=conf.env['WERROR_CFLAGS'], define='HAVE_ATTRIBUTE_USED') # We try to use headers for a compile-time test. conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER #define B __BYTE_ORDER #elif defined(BYTE_ORDER) #define B BYTE_ORDER #endif #ifdef __LITTLE_ENDIAN #define LITTLE __LITTLE_ENDIAN #elif defined(LITTLE_ENDIAN) #define LITTLE LITTLE_ENDIAN #endif #if !defined(LITTLE) || !defined(B) || LITTLE != B #error Not little endian. #endif""", headers="endian.h sys/endian.h", define="HAVE_LITTLE_ENDIAN") conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER #define B __BYTE_ORDER #elif defined(BYTE_ORDER) #define B BYTE_ORDER #endif #ifdef __BIG_ENDIAN #define BIG __BIG_ENDIAN #elif defined(BIG_ENDIAN) #define BIG BIG_ENDIAN #endif #if !defined(BIG) || !defined(B) || BIG != B #error Not big endian. #endif""", headers="endian.h sys/endian.h", define="HAVE_BIG_ENDIAN") if not conf.CONFIG_SET("HAVE_BIG_ENDIAN") and not conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"): # That didn't work! Do runtime test. conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u; u.i = 0x01020304; return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;""", addmain=True, execute=True, define='HAVE_LITTLE_ENDIAN', msg="Checking for HAVE_LITTLE_ENDIAN - runtime") conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u; u.i = 0x01020304; return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;""", addmain=True, execute=True, define='HAVE_BIG_ENDIAN', msg="Checking for HAVE_BIG_ENDIAN - runtime") # Extra sanity check. if conf.CONFIG_SET("HAVE_BIG_ENDIAN") == conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"): Logs.error("Failed endian determination. The PDP-11 is back?") sys.exit(1) conf.CHECK_CODE('return __builtin_choose_expr(1, 0, "garbage");', link=True, define='HAVE_BUILTIN_CHOOSE_EXPR') conf.CHECK_CODE('return __builtin_clz(1) == (sizeof(int)*8 - 1) ? 0 : 1;', link=True, define='HAVE_BUILTIN_CLZ') conf.CHECK_CODE('return __builtin_clzl(1) == (sizeof(long)*8 - 1) ? 0 : 1;', link=True, define='HAVE_BUILTIN_CLZL') conf.CHECK_CODE('return __builtin_clzll(1) == (sizeof(long long)*8 - 1) ? 0 : 1;', link=True, define='HAVE_BUILTIN_CLZLL') conf.CHECK_CODE('return __builtin_constant_p(1) ? 0 : 1;', link=True, define='HAVE_BUILTIN_CONSTANT_P') conf.CHECK_CODE('return __builtin_expect(main != 0, 1) ? 0 : 1;', link=True, define='HAVE_BUILTIN_EXPECT') conf.CHECK_CODE('return __builtin_popcountl(255L) == 8 ? 0 : 1;', link=True, define='HAVE_BUILTIN_POPCOUNTL') conf.CHECK_CODE('return __builtin_types_compatible_p(char *, int) ? 1 : 0;', link=True, define='HAVE_BUILTIN_TYPES_COMPATIBLE_P') conf.CHECK_CODE('int *foo = (int[]) { 1, 2, 3, 4 }; return foo[0] ? 0 : 1;', define='HAVE_COMPOUND_LITERALS') conf.CHECK_CODE('struct foo { unsigned int x; int arr[]; };', addmain=False, link=False, define='HAVE_FLEXIBLE_ARRAY_MEMBER') conf.CHECK_CODE("""#include int main(void) { return isblank(' ') ? 0 : 1; }""", link=True, addmain=False, add_headers=False, define='HAVE_ISBLANK') conf.CHECK_CODE('int x = 1; __typeof__(x) i; i = x; return i == x ? 0 : 1;', link=True, define='HAVE_TYPEOF') conf.CHECK_CODE('int __attribute__((warn_unused_result)) func(int x) { return x; }', addmain=False, link=False, cflags=conf.env['WERROR_CFLAGS'], define='HAVE_WARN_UNUSED_RESULT') # backtrace could be in libexecinfo or in libc conf.CHECK_FUNCS_IN('backtrace backtrace_symbols', 'execinfo', checklibc=True, headers='execinfo.h') # Only check for FILE_OFFSET_BITS=64 if off_t is normally small: # use raw routines because wrappers include previous _GNU_SOURCE # or _FILE_OFFSET_BITS defines. conf.check(fragment="""#include int main(void) { return !(sizeof(off_t) < 8); }""", execute=True, msg='Checking for small off_t', define_name='SMALL_OFF_T') # Unreliable return value above, hence use define. if conf.CONFIG_SET('SMALL_OFF_T'): conf.check(fragment="""#include int main(void) { return !(sizeof(off_t) >= 8); }""", execute=True, msg='Checking for -D_FILE_OFFSET_BITS=64', ccflags='-D_FILE_OFFSET_BITS=64', define_name='HAVE_FILE_OFFSET_BITS') def ccan_module(bld, name, deps=''): bld.SAMBA_SUBSYSTEM('ccan-%s' % name, source=bld.path.ant_glob('%s/*.c' % name), deps=deps) bld.env.CCAN_MODS += 'ccan-%s ' % name def build(bld): bld.env.CCAN_MODS = "" # These have actual C files. ccan_module(bld, 'hash', 'ccan-build_assert') ccan_module(bld, 'ilog', 'ccan-compiler'); ccan_module(bld, 'read_write_all') ccan_module(bld, 'str', 'ccan-build_assert') ccan_module(bld, 'tally', 'ccan-build_assert ccan-likely') # These are headers only. ccan_module(bld, 'array_size', 'ccan-build_assert') ccan_module(bld, 'asearch','ccan-typesafe_cb ccan-array_size') ccan_module(bld, 'build_assert') ccan_module(bld, 'cast', 'ccan-build_assert') ccan_module(bld, 'check_type', 'ccan-build_assert') ccan_module(bld, 'compiler') ccan_module(bld, 'endian') ccan_module(bld, 'likely', 'ccan-str') ccan_module(bld, 'typesafe_cb') ccan_module(bld, 'err', 'ccan-compiler') # Failtest pulls in a lot of stuff, and it's only for unit tests. if bld.env.DEVELOPER_MODE: ccan_module(bld, 'container_of', 'ccan-check_type') ccan_module(bld, 'htable', 'ccan-compiler') ccan_module(bld, 'list', 'ccan-container_of') ccan_module(bld, 'time') ccan_module(bld, 'tcon') ccan_module(bld, 'tlist', 'ccan-list ccan-tcon') ccan_module(bld, 'failtest', ''' ccan-err ccan-hash ccan-htable ccan-list ccan-read_write_all ccan-str ccan-time execinfo ''') # This is the complete CCAN collection as one group. bld.SAMBA_LIBRARY('ccan', source='', deps=bld.env.CCAN_MODS, private_library=True, grouping_library=True) ntdb-1.0/lib/replace/000077500000000000000000000000001224151530700144625ustar00rootroot00000000000000ntdb-1.0/lib/replace/.checker_innocent000066400000000000000000000002461224151530700177660ustar00rootroot00000000000000>>>MISTAKE21_create_files_6a9e68ada99a97cb >>>MISTAKE21_os2_delete_9b2bfa7f38711d09 >>>MISTAKE21_os2_delete_2fcc29aaa99a97cb >>>SECURITY2_os2_delete_9b2bfa7f1c9396ca ntdb-1.0/lib/replace/Makefile000066400000000000000000000015031224151530700161210ustar00rootroot00000000000000# simple makefile wrapper to run waf WAF=WAF_MAKE=1 PATH=buildtools/bin:../../buildtools/bin:$$PATH waf all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: $(WAF) test $(TEST_OPTIONS) testenv: $(WAF) test --testenv $(TEST_OPTIONS) quicktest: $(WAF) test --quick $(TEST_OPTIONS) dist: touch .tmplock WAFLOCK=.tmplock $(WAF) dist distcheck: touch .tmplock WAFLOCK=.tmplock $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all check: test torture: all # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags bin/%:: FORCE $(WAF) --targets=`basename $@` FORCE: ntdb-1.0/lib/replace/README000066400000000000000000000030401224151530700153370ustar00rootroot00000000000000This subsystem ensures that we can always use a certain core set of functions and types, that are either provided by the OS or by replacement functions / definitions in this subsystem. The aim is to try to stick to POSIX functions in here as much as possible. Convenience functions that are available on no platform at all belong in other subsystems (such as LIBUTIL). The following functions are guaranteed: ftruncate strlcpy strlcat mktime rename initgroups memmove strdup setlinebuf vsyslog timegm setenv unsetenv strndup strnlen waitpid seteuid setegid asprintf snprintf vasprintf vsnprintf opendir readdir telldir seekdir clock_gettime closedir dlopen dlclose dlsym dlerror chroot bzero strerror errno mkdtemp mkstemp (a secure one!) pread pwrite chown lchown readline (the library) inet_ntoa inet_ntop inet_pton inet_aton strtoll strtoull socketpair strptime getaddrinfo freeaddrinfo getnameinfo gai_strerror getifaddrs freeifaddrs utime utimes dup2 link readlink symlink realpath poll setproctitle Types: bool socklen_t uint{8,16,32,64}_t int{8,16,32,64}_t intptr_t sig_atomic_t blksize_t blkcnt_t Constants: PATH_NAME_MAX UINT{16,32,64}_MAX INT32_MAX RTLD_LAZY HOST_NAME_MAX UINT16_MAX UINT32_MAX UINT64_MAX CHAR_BIT Macros: va_copy __FUNCTION__ __FILE__ __LINE__ __LINESTR__ __location__ __STRING __STRINGSTRING MIN MAX QSORT_CAST ZERO_STRUCT ZERO_STRUCTP ZERO_STRUCTPN ZERO_ARRAY ARRAY_SIZE PTR_DIFF Headers: stdint.h stdbool.h Optional C keywords: volatile Prerequisites: memset (for bzero) syslog (for vsyslog) mktemp (for mkstemp and mkdtemp) ntdb-1.0/lib/replace/configure000077500000000000000000000006501224151530700163720ustar00rootroot00000000000000#!/bin/sh PREVPATH=`dirname $0` if [ -f $PREVPATH/../../buildtools/bin/waf ]; then WAF=../../buildtools/bin/waf elif [ -f $PREVPATH/buildtools/bin/waf ]; then WAF=./buildtools/bin/waf else echo "replace: Unable to find waf" exit 1 fi # using JOBS=1 gives maximum compatibility with # systems like AIX which have broken threading in python JOBS=1 export JOBS cd . || exit 1 $WAF configure "$@" || exit 1 cd $PREVPATH ntdb-1.0/lib/replace/crypt.c000066400000000000000000000534611224151530700160000ustar00rootroot00000000000000/* This bit of code was derived from the UFC-crypt package which carries the following copyright Modified for use by Samba by Andrew Tridgell, October 1994 Note that this routine is only faster on some machines. Under Linux 1.1.51 libc 4.5.26 I actually found this routine to be slightly slower. Under SunOS I found a huge speedup by using these routines (a factor of 20 or so) Warning: I've had a report from Steve Kennedy that this crypt routine may sometimes get the wrong answer. Only use UFC_CRYT if you really need it. */ #include "replace.h" #ifndef HAVE_CRYPT /* * UFC-crypt: ultra fast crypt(3) implementation * * Copyright (C) 1991-1998, Free Software Foundation, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . * * @(#)crypt_util.c 2.31 02/08/92 * * Support routines * */ #ifndef long32 #define long32 int32_t #endif #ifndef long64 #define long64 int64_t #endif #ifndef ufc_long #define ufc_long unsigned #endif #ifndef _UFC_64_ #define _UFC_32_ #endif /* * Permutation done once on the 56 bit * key derived from the original 8 byte ASCII key. */ static int pc1[56] = { 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 }; /* * How much to rotate each 28 bit half of the pc1 permutated * 56 bit key before using pc2 to give the i' key */ static int rots[16] = { 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 }; /* * Permutation giving the key * of the i' DES round */ static int pc2[48] = { 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32 }; /* * The E expansion table which selects * bits from the 32 bit intermediate result. */ static int esel[48] = { 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1 }; static int e_inverse[64]; /* * Permutation done on the * result of sbox lookups */ static int perm32[32] = { 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 }; /* * The sboxes */ static int sbox[8][4][16]= { { { 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7 }, { 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8 }, { 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0 }, { 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 } }, { { 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10 }, { 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5 }, { 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15 }, { 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 } }, { { 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8 }, { 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1 }, { 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7 }, { 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 } }, { { 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15 }, { 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9 }, { 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4 }, { 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 } }, { { 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9 }, { 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6 }, { 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14 }, { 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 } }, { { 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11 }, { 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8 }, { 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6 }, { 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 } }, { { 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1 }, { 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6 }, { 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2 }, { 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12 } }, { { 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7 }, { 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2 }, { 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8 }, { 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11 } } }; /* * This is the final * permutation matrix */ static int final_perm[64] = { 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 }; /* * The 16 DES keys in BITMASK format */ #ifdef _UFC_32_ long32 _ufc_keytab[16][2]; #endif #ifdef _UFC_64_ long64 _ufc_keytab[16]; #endif #define ascii_to_bin(c) ((c)>='a'?(c-59):(c)>='A'?((c)-53):(c)-'.') #define bin_to_ascii(c) ((c)>=38?((c)-38+'a'):(c)>=12?((c)-12+'A'):(c)+'.') /* Macro to set a bit (0..23) */ #define BITMASK(i) ( (1<<(11-(i)%12+3)) << ((i)<12?16:0) ) /* * sb arrays: * * Workhorses of the inner loop of the DES implementation. * They do sbox lookup, shifting of this value, 32 bit * permutation and E permutation for the next round. * * Kept in 'BITMASK' format. */ #ifdef _UFC_32_ long32 _ufc_sb0[8192], _ufc_sb1[8192], _ufc_sb2[8192], _ufc_sb3[8192]; static long32 *sb[4] = {_ufc_sb0, _ufc_sb1, _ufc_sb2, _ufc_sb3}; #endif #ifdef _UFC_64_ long64 _ufc_sb0[4096], _ufc_sb1[4096], _ufc_sb2[4096], _ufc_sb3[4096]; static long64 *sb[4] = {_ufc_sb0, _ufc_sb1, _ufc_sb2, _ufc_sb3}; #endif /* * eperm32tab: do 32 bit permutation and E selection * * The first index is the byte number in the 32 bit value to be permuted * - second - is the value of this byte * - third - selects the two 32 bit values * * The table is used and generated internally in init_des to speed it up */ static ufc_long eperm32tab[4][256][2]; /* * do_pc1: permform pc1 permutation in the key schedule generation. * * The first index is the byte number in the 8 byte ASCII key * - second - - the two 28 bits halfs of the result * - third - selects the 7 bits actually used of each byte * * The result is kept with 28 bit per 32 bit with the 4 most significant * bits zero. */ static ufc_long do_pc1[8][2][128]; /* * do_pc2: permform pc2 permutation in the key schedule generation. * * The first index is the septet number in the two 28 bit intermediate values * - second - - - septet values * * Knowledge of the structure of the pc2 permutation is used. * * The result is kept with 28 bit per 32 bit with the 4 most significant * bits zero. */ static ufc_long do_pc2[8][128]; /* * efp: undo an extra e selection and do final * permutation giving the DES result. * * Invoked 6 bit a time on two 48 bit values * giving two 32 bit longs. */ static ufc_long efp[16][64][2]; static unsigned char bytemask[8] = { 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 }; static ufc_long longmask[32] = { 0x80000000, 0x40000000, 0x20000000, 0x10000000, 0x08000000, 0x04000000, 0x02000000, 0x01000000, 0x00800000, 0x00400000, 0x00200000, 0x00100000, 0x00080000, 0x00040000, 0x00020000, 0x00010000, 0x00008000, 0x00004000, 0x00002000, 0x00001000, 0x00000800, 0x00000400, 0x00000200, 0x00000100, 0x00000080, 0x00000040, 0x00000020, 0x00000010, 0x00000008, 0x00000004, 0x00000002, 0x00000001 }; /* * Silly rewrite of 'bzero'. I do so * because some machines don't have * bzero and some don't have memset. */ static void clearmem(char *start, int cnt) { while(cnt--) *start++ = '\0'; } static int initialized = 0; /* lookup a 6 bit value in sbox */ #define s_lookup(i,s) sbox[(i)][(((s)>>4) & 0x2)|((s) & 0x1)][((s)>>1) & 0xf]; /* * Initialize unit - may be invoked directly * by fcrypt users. */ static void ufc_init_des(void) { int comes_from_bit; int bit, sg; ufc_long j; ufc_long mask1, mask2; /* * Create the do_pc1 table used * to affect pc1 permutation * when generating keys */ for(bit = 0; bit < 56; bit++) { comes_from_bit = pc1[bit] - 1; mask1 = bytemask[comes_from_bit % 8 + 1]; mask2 = longmask[bit % 28 + 4]; for(j = 0; j < 128; j++) { if(j & mask1) do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2; } } /* * Create the do_pc2 table used * to affect pc2 permutation when * generating keys */ for(bit = 0; bit < 48; bit++) { comes_from_bit = pc2[bit] - 1; mask1 = bytemask[comes_from_bit % 7 + 1]; mask2 = BITMASK(bit % 24); for(j = 0; j < 128; j++) { if(j & mask1) do_pc2[comes_from_bit / 7][j] |= mask2; } } /* * Now generate the table used to do combined * 32 bit permutation and e expansion * * We use it because we have to permute 16384 32 bit * longs into 48 bit in order to initialize sb. * * Looping 48 rounds per permutation becomes * just too slow... * */ clearmem((char*)eperm32tab, sizeof(eperm32tab)); for(bit = 0; bit < 48; bit++) { ufc_long inner_mask1,comes_from; comes_from = perm32[esel[bit]-1]-1; inner_mask1 = bytemask[comes_from % 8]; for(j = 256; j--;) { if(j & inner_mask1) eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK(bit % 24); } } /* * Create the sb tables: * * For each 12 bit segment of an 48 bit intermediate * result, the sb table precomputes the two 4 bit * values of the sbox lookups done with the two 6 * bit halves, shifts them to their proper place, * sends them through perm32 and finally E expands * them so that they are ready for the next * DES round. * */ for(sg = 0; sg < 4; sg++) { int j1, j2; int s1, s2; for(j1 = 0; j1 < 64; j1++) { s1 = s_lookup(2 * sg, j1); for(j2 = 0; j2 < 64; j2++) { ufc_long to_permute, inx; s2 = s_lookup(2 * sg + 1, j2); to_permute = ((s1 << 4) | s2) << (24 - 8 * sg); #ifdef _UFC_32_ inx = ((j1 << 6) | j2) << 1; sb[sg][inx ] = eperm32tab[0][(to_permute >> 24) & 0xff][0]; sb[sg][inx+1] = eperm32tab[0][(to_permute >> 24) & 0xff][1]; sb[sg][inx ] |= eperm32tab[1][(to_permute >> 16) & 0xff][0]; sb[sg][inx+1] |= eperm32tab[1][(to_permute >> 16) & 0xff][1]; sb[sg][inx ] |= eperm32tab[2][(to_permute >> 8) & 0xff][0]; sb[sg][inx+1] |= eperm32tab[2][(to_permute >> 8) & 0xff][1]; sb[sg][inx ] |= eperm32tab[3][(to_permute) & 0xff][0]; sb[sg][inx+1] |= eperm32tab[3][(to_permute) & 0xff][1]; #endif #ifdef _UFC_64_ inx = ((j1 << 6) | j2); sb[sg][inx] = ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) | (long64)eperm32tab[0][(to_permute >> 24) & 0xff][1]; sb[sg][inx] |= ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) | (long64)eperm32tab[1][(to_permute >> 16) & 0xff][1]; sb[sg][inx] |= ((long64)eperm32tab[2][(to_permute >> 8) & 0xff][0] << 32) | (long64)eperm32tab[2][(to_permute >> 8) & 0xff][1]; sb[sg][inx] |= ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) | (long64)eperm32tab[3][(to_permute) & 0xff][1]; #endif } } } /* * Create an inverse matrix for esel telling * where to plug out bits if undoing it */ for(bit=48; bit--;) { e_inverse[esel[bit] - 1 ] = bit; e_inverse[esel[bit] - 1 + 32] = bit + 48; } /* * create efp: the matrix used to * undo the E expansion and effect final permutation */ clearmem((char*)efp, sizeof efp); for(bit = 0; bit < 64; bit++) { int o_bit, o_long; ufc_long word_value, inner_mask1, inner_mask2; int comes_from_f_bit, comes_from_e_bit; int comes_from_word, bit_within_word; /* See where bit i belongs in the two 32 bit long's */ o_long = bit / 32; /* 0..1 */ o_bit = bit % 32; /* 0..31 */ /* * And find a bit in the e permutated value setting this bit. * * Note: the e selection may have selected the same bit several * times. By the initialization of e_inverse, we only look * for one specific instance. */ comes_from_f_bit = final_perm[bit] - 1; /* 0..63 */ comes_from_e_bit = e_inverse[comes_from_f_bit]; /* 0..95 */ comes_from_word = comes_from_e_bit / 6; /* 0..15 */ bit_within_word = comes_from_e_bit % 6; /* 0..5 */ inner_mask1 = longmask[bit_within_word + 26]; inner_mask2 = longmask[o_bit]; for(word_value = 64; word_value--;) { if(word_value & inner_mask1) efp[comes_from_word][word_value][o_long] |= inner_mask2; } } initialized++; } /* * Process the elements of the sb table permuting the * bits swapped in the expansion by the current salt. */ #ifdef _UFC_32_ static void shuffle_sb(long32 *k, ufc_long saltbits) { ufc_long j; long32 x; for(j=4096; j--;) { x = (k[0] ^ k[1]) & (long32)saltbits; *k++ ^= x; *k++ ^= x; } } #endif #ifdef _UFC_64_ static void shuffle_sb(long64 *k, ufc_long saltbits) { ufc_long j; long64 x; for(j=4096; j--;) { x = ((*k >> 32) ^ *k) & (long64)saltbits; *k++ ^= (x << 32) | x; } } #endif /* * Setup the unit for a new salt * Hopefully we'll not see a new salt in each crypt call. */ static unsigned char current_salt[3] = "&&"; /* invalid value */ static ufc_long current_saltbits = 0; static int direction = 0; static void setup_salt(const char *s1) { ufc_long i, j, saltbits; const unsigned char *s2 = (const unsigned char *)s1; if(!initialized) ufc_init_des(); if(s2[0] == current_salt[0] && s2[1] == current_salt[1]) return; current_salt[0] = s2[0]; current_salt[1] = s2[1]; /* * This is the only crypt change to DES: * entries are swapped in the expansion table * according to the bits set in the salt. */ saltbits = 0; for(i = 0; i < 2; i++) { long c=ascii_to_bin(s2[i]); if(c < 0 || c > 63) c = 0; for(j = 0; j < 6; j++) { if((c >> j) & 0x1) saltbits |= BITMASK(6 * i + j); } } /* * Permute the sb table values * to reflect the changed e * selection table */ shuffle_sb(_ufc_sb0, current_saltbits ^ saltbits); shuffle_sb(_ufc_sb1, current_saltbits ^ saltbits); shuffle_sb(_ufc_sb2, current_saltbits ^ saltbits); shuffle_sb(_ufc_sb3, current_saltbits ^ saltbits); current_saltbits = saltbits; } static void ufc_mk_keytab(char *key) { ufc_long v1, v2, *k1; int i; #ifdef _UFC_32_ long32 v, *k2 = &_ufc_keytab[0][0]; #endif #ifdef _UFC_64_ long64 v, *k2 = &_ufc_keytab[0]; #endif v1 = v2 = 0; k1 = &do_pc1[0][0][0]; for(i = 8; i--;) { v1 |= k1[*key & 0x7f]; k1 += 128; v2 |= k1[*key++ & 0x7f]; k1 += 128; } for(i = 0; i < 16; i++) { k1 = &do_pc2[0][0]; v1 = (v1 << rots[i]) | (v1 >> (28 - rots[i])); v = k1[(v1 >> 21) & 0x7f]; k1 += 128; v |= k1[(v1 >> 14) & 0x7f]; k1 += 128; v |= k1[(v1 >> 7) & 0x7f]; k1 += 128; v |= k1[(v1 ) & 0x7f]; k1 += 128; #ifdef _UFC_32_ *k2++ = v; v = 0; #endif #ifdef _UFC_64_ v <<= 32; #endif v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i])); v |= k1[(v2 >> 21) & 0x7f]; k1 += 128; v |= k1[(v2 >> 14) & 0x7f]; k1 += 128; v |= k1[(v2 >> 7) & 0x7f]; k1 += 128; v |= k1[(v2 ) & 0x7f]; *k2++ = v; } direction = 0; } /* * Undo an extra E selection and do final permutations */ ufc_long *_ufc_dofinalperm(ufc_long l1, ufc_long l2, ufc_long r1, ufc_long r2) { ufc_long v1, v2, x; static ufc_long ary[2]; x = (l1 ^ l2) & current_saltbits; l1 ^= x; l2 ^= x; x = (r1 ^ r2) & current_saltbits; r1 ^= x; r2 ^= x; v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3; v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1]; v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1]; v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1]; v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1]; v1 |= efp[11][ r1 & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1]; v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1]; v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1]; v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1]; v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1]; v1 |= efp[ 6][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1]; v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1]; v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1]; v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1]; v1 |= efp[ 2][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1]; v1 |= efp[ 1][(l1 >>= 10) & 0x3f][0]; v2 |= efp[ 1][ l1 & 0x3f][1]; v1 |= efp[ 0][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 0][ l1 & 0x3f][1]; ary[0] = v1; ary[1] = v2; return ary; } /* * crypt only: convert from 64 bit to 11 bit ASCII * prefixing with the salt */ static char *output_conversion(ufc_long v1, ufc_long v2, const char *salt) { static char outbuf[14]; int i, s; outbuf[0] = salt[0]; outbuf[1] = salt[1] ? salt[1] : salt[0]; for(i = 0; i < 5; i++) outbuf[i + 2] = bin_to_ascii((v1 >> (26 - 6 * i)) & 0x3f); s = (v2 & 0xf) << 2; v2 = (v2 >> 2) | ((v1 & 0x3) << 30); for(i = 5; i < 10; i++) outbuf[i + 2] = bin_to_ascii((v2 >> (56 - 6 * i)) & 0x3f); outbuf[12] = bin_to_ascii(s); outbuf[13] = 0; return outbuf; } /* * UNIX crypt function */ static ufc_long *_ufc_doit(ufc_long , ufc_long, ufc_long, ufc_long, ufc_long); char *ufc_crypt(const char *key,const char *salt) { ufc_long *s; char ktab[9]; /* * Hack DES tables according to salt */ setup_salt(salt); /* * Setup key schedule */ clearmem(ktab, sizeof ktab); strncpy(ktab, key, 8); ufc_mk_keytab(ktab); /* * Go for the 25 DES encryptions */ s = _ufc_doit((ufc_long)0, (ufc_long)0, (ufc_long)0, (ufc_long)0, (ufc_long)25); /* * And convert back to 6 bit ASCII */ return output_conversion(s[0], s[1], salt); } #ifdef _UFC_32_ /* * 32 bit version */ extern long32 _ufc_keytab[16][2]; extern long32 _ufc_sb0[], _ufc_sb1[], _ufc_sb2[], _ufc_sb3[]; #define SBA(sb, v) (*(long32*)((char*)(sb)+(v))) static ufc_long *_ufc_doit(ufc_long l1, ufc_long l2, ufc_long r1, ufc_long r2, ufc_long itr) { int i; long32 s, *k; while(itr--) { k = &_ufc_keytab[0][0]; for(i=8; i--; ) { s = *k++ ^ r1; l1 ^= SBA(_ufc_sb1, s & 0xffff); l2 ^= SBA(_ufc_sb1, (s & 0xffff)+4); l1 ^= SBA(_ufc_sb0, s >>= 16); l2 ^= SBA(_ufc_sb0, (s) +4); s = *k++ ^ r2; l1 ^= SBA(_ufc_sb3, s & 0xffff); l2 ^= SBA(_ufc_sb3, (s & 0xffff)+4); l1 ^= SBA(_ufc_sb2, s >>= 16); l2 ^= SBA(_ufc_sb2, (s) +4); s = *k++ ^ l1; r1 ^= SBA(_ufc_sb1, s & 0xffff); r2 ^= SBA(_ufc_sb1, (s & 0xffff)+4); r1 ^= SBA(_ufc_sb0, s >>= 16); r2 ^= SBA(_ufc_sb0, (s) +4); s = *k++ ^ l2; r1 ^= SBA(_ufc_sb3, s & 0xffff); r2 ^= SBA(_ufc_sb3, (s & 0xffff)+4); r1 ^= SBA(_ufc_sb2, s >>= 16); r2 ^= SBA(_ufc_sb2, (s) +4); } s=l1; l1=r1; r1=s; s=l2; l2=r2; r2=s; } return _ufc_dofinalperm(l1, l2, r1, r2); } #endif #ifdef _UFC_64_ /* * 64 bit version */ extern long64 _ufc_keytab[16]; extern long64 _ufc_sb0[], _ufc_sb1[], _ufc_sb2[], _ufc_sb3[]; #define SBA(sb, v) (*(long64*)((char*)(sb)+(v))) static ufc_long *_ufc_doit(ufc_long l1, ufc_long l2, ufc_long r1, ufc_long r2, ufc_long itr) { int i; long64 l, r, s, *k; l = (((long64)l1) << 32) | ((long64)l2); r = (((long64)r1) << 32) | ((long64)r2); while(itr--) { k = &_ufc_keytab[0]; for(i=8; i--; ) { s = *k++ ^ r; l ^= SBA(_ufc_sb3, (s >> 0) & 0xffff); l ^= SBA(_ufc_sb2, (s >> 16) & 0xffff); l ^= SBA(_ufc_sb1, (s >> 32) & 0xffff); l ^= SBA(_ufc_sb0, (s >> 48) & 0xffff); s = *k++ ^ l; r ^= SBA(_ufc_sb3, (s >> 0) & 0xffff); r ^= SBA(_ufc_sb2, (s >> 16) & 0xffff); r ^= SBA(_ufc_sb1, (s >> 32) & 0xffff); r ^= SBA(_ufc_sb0, (s >> 48) & 0xffff); } s=l; l=r; r=s; } l1 = l >> 32; l2 = l & 0xffffffff; r1 = r >> 32; r2 = r & 0xffffffff; return _ufc_dofinalperm(l1, l2, r1, r2); } #endif #else int ufc_dummy_procedure(void); int ufc_dummy_procedure(void) {return 0;} #endif ntdb-1.0/lib/replace/dlfcn.c000066400000000000000000000035431224151530700157210ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Samba system utilities Copyright (C) Andrew Tridgell 1992-1998 Copyright (C) Jeremy Allison 1998-2002 Copyright (C) Jelmer Vernooij 2006 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #ifdef HAVE_DL_H #include #endif #ifndef HAVE_DLOPEN #ifdef DLOPEN_TAKES_UNSIGNED_FLAGS void *rep_dlopen(const char *name, unsigned int flags) #else void *rep_dlopen(const char *name, int flags) #endif { #ifdef HAVE_SHL_LOAD if (name == NULL) return PROG_HANDLE; return (void *)shl_load(name, flags, 0); #else return NULL; #endif } #endif #ifndef HAVE_DLSYM void *rep_dlsym(void *handle, const char *symbol) { #ifdef HAVE_SHL_FINDSYM void *sym_addr; if (!shl_findsym((shl_t *)&handle, symbol, TYPE_UNDEFINED, &sym_addr)) return sym_addr; #endif return NULL; } #endif #ifndef HAVE_DLERROR char *rep_dlerror(void) { return "dynamic loading of objects not supported on this platform"; } #endif #ifndef HAVE_DLCLOSE int rep_dlclose(void *handle) { #ifdef HAVE_SHL_CLOSE return shl_unload((shl_t)handle); #else return 0; #endif } #endif ntdb-1.0/lib/replace/getaddrinfo.c000066400000000000000000000246371224151530700171300ustar00rootroot00000000000000/* PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. */ /*------------------------------------------------------------------------- * * getaddrinfo.c * Support getaddrinfo() on platforms that don't have it. * * We also supply getnameinfo() here, assuming that the platform will have * it if and only if it has getaddrinfo(). If this proves false on some * platform, we'll need to split this file and provide a separate configure * test for getnameinfo(). * * Copyright (c) 2003-2007, PostgreSQL Global Development Group * * Copyright (C) 2007 Jeremy Allison. * Modified to return multiple IPv4 addresses for Samba. * *------------------------------------------------------------------------- */ #include "replace.h" #include "system/network.h" #ifndef SMB_MALLOC #define SMB_MALLOC(s) malloc(s) #endif #ifndef SMB_STRDUP #define SMB_STRDUP(s) strdup(s) #endif static int check_hostent_err(struct hostent *hp) { if (!hp) { switch (h_errno) { case HOST_NOT_FOUND: case NO_DATA: return EAI_NONAME; case TRY_AGAIN: return EAI_AGAIN; case NO_RECOVERY: default: return EAI_FAIL; } } if (!hp->h_name || hp->h_addrtype != AF_INET) { return EAI_FAIL; } return 0; } static char *canon_name_from_hostent(struct hostent *hp, int *perr) { char *ret = NULL; *perr = check_hostent_err(hp); if (*perr) { return NULL; } ret = SMB_STRDUP(hp->h_name); if (!ret) { *perr = EAI_MEMORY; } return ret; } static char *get_my_canon_name(int *perr) { char name[HOST_NAME_MAX+1]; if (gethostname(name, HOST_NAME_MAX) == -1) { *perr = EAI_FAIL; return NULL; } /* Ensure null termination. */ name[HOST_NAME_MAX] = '\0'; return canon_name_from_hostent(gethostbyname(name), perr); } static char *get_canon_name_from_addr(struct in_addr ip, int *perr) { return canon_name_from_hostent( gethostbyaddr(&ip, sizeof(ip), AF_INET), perr); } static struct addrinfo *alloc_entry(const struct addrinfo *hints, struct in_addr ip, unsigned short port) { struct sockaddr_in *psin = NULL; struct addrinfo *ai = SMB_MALLOC(sizeof(*ai)); if (!ai) { return NULL; } memset(ai, '\0', sizeof(*ai)); psin = SMB_MALLOC(sizeof(*psin)); if (!psin) { free(ai); return NULL; } memset(psin, '\0', sizeof(*psin)); psin->sin_family = AF_INET; psin->sin_port = htons(port); psin->sin_addr = ip; ai->ai_flags = 0; ai->ai_family = AF_INET; ai->ai_socktype = hints->ai_socktype; ai->ai_protocol = hints->ai_protocol; ai->ai_addrlen = sizeof(*psin); ai->ai_addr = (struct sockaddr *) psin; ai->ai_canonname = NULL; ai->ai_next = NULL; return ai; } /* * get address info for a single ipv4 address. * * Bugs: - servname can only be a number, not text. */ static int getaddr_info_single_addr(const char *service, uint32_t addr, const struct addrinfo *hints, struct addrinfo **res) { struct addrinfo *ai = NULL; struct in_addr ip; unsigned short port = 0; if (service) { port = (unsigned short)atoi(service); } ip.s_addr = htonl(addr); ai = alloc_entry(hints, ip, port); if (!ai) { return EAI_MEMORY; } /* If we're asked for the canonical name, * make sure it returns correctly. */ if (!(hints->ai_flags & AI_NUMERICSERV) && hints->ai_flags & AI_CANONNAME) { int err; if (addr == INADDR_LOOPBACK || addr == INADDR_ANY) { ai->ai_canonname = get_my_canon_name(&err); } else { ai->ai_canonname = get_canon_name_from_addr(ip,&err); } if (ai->ai_canonname == NULL) { freeaddrinfo(ai); return err; } } *res = ai; return 0; } /* * get address info for multiple ipv4 addresses. * * Bugs: - servname can only be a number, not text. */ static int getaddr_info_name(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res) { struct addrinfo *listp = NULL, *prevp = NULL; char **pptr = NULL; int err; struct hostent *hp = NULL; unsigned short port = 0; if (service) { port = (unsigned short)atoi(service); } hp = gethostbyname(node); err = check_hostent_err(hp); if (err) { return err; } for(pptr = hp->h_addr_list; *pptr; pptr++) { struct in_addr ip = *(struct in_addr *)*pptr; struct addrinfo *ai = alloc_entry(hints, ip, port); if (!ai) { freeaddrinfo(listp); return EAI_MEMORY; } if (!listp) { listp = ai; prevp = ai; ai->ai_canonname = SMB_STRDUP(hp->h_name); if (!ai->ai_canonname) { freeaddrinfo(listp); return EAI_MEMORY; } } else { prevp->ai_next = ai; prevp = ai; } } *res = listp; return 0; } /* * get address info for ipv4 sockets. * * Bugs: - servname can only be a number, not text. */ int rep_getaddrinfo(const char *node, const char *service, const struct addrinfo * hintp, struct addrinfo ** res) { struct addrinfo hints; /* Setup the hints struct. */ if (hintp == NULL) { memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; } else { memcpy(&hints, hintp, sizeof(hints)); } if (hints.ai_family != AF_INET && hints.ai_family != AF_UNSPEC) { return EAI_FAMILY; } if (hints.ai_socktype == 0) { hints.ai_socktype = SOCK_STREAM; } if (!node && !service) { return EAI_NONAME; } if (node) { if (node[0] == '\0') { return getaddr_info_single_addr(service, INADDR_ANY, &hints, res); } else if (hints.ai_flags & AI_NUMERICHOST) { struct in_addr ip; if (!inet_aton(node, &ip)) { return EAI_FAIL; } return getaddr_info_single_addr(service, ntohl(ip.s_addr), &hints, res); } else { return getaddr_info_name(node, service, &hints, res); } } else if (hints.ai_flags & AI_PASSIVE) { return getaddr_info_single_addr(service, INADDR_ANY, &hints, res); } return getaddr_info_single_addr(service, INADDR_LOOPBACK, &hints, res); } void rep_freeaddrinfo(struct addrinfo *res) { struct addrinfo *next = NULL; for (;res; res = next) { next = res->ai_next; if (res->ai_canonname) { free(res->ai_canonname); } if (res->ai_addr) { free(res->ai_addr); } free(res); } } const char *rep_gai_strerror(int errcode) { #ifdef HAVE_HSTRERROR int hcode; switch (errcode) { case EAI_NONAME: hcode = HOST_NOT_FOUND; break; case EAI_AGAIN: hcode = TRY_AGAIN; break; case EAI_FAIL: default: hcode = NO_RECOVERY; break; } return hstrerror(hcode); #else /* !HAVE_HSTRERROR */ switch (errcode) { case EAI_NONAME: return "Unknown host"; case EAI_AGAIN: return "Host name lookup failure"; #ifdef EAI_BADFLAGS case EAI_BADFLAGS: return "Invalid argument"; #endif #ifdef EAI_FAMILY case EAI_FAMILY: return "Address family not supported"; #endif #ifdef EAI_MEMORY case EAI_MEMORY: return "Not enough memory"; #endif #ifdef EAI_NODATA case EAI_NODATA: return "No host data of that type was found"; #endif #ifdef EAI_SERVICE case EAI_SERVICE: return "Class type not found"; #endif #ifdef EAI_SOCKTYPE case EAI_SOCKTYPE: return "Socket type not supported"; #endif default: return "Unknown server error"; } #endif /* HAVE_HSTRERROR */ } static int gethostnameinfo(const struct sockaddr *sa, char *node, size_t nodelen, int flags) { int ret = -1; char *p = NULL; if (!(flags & NI_NUMERICHOST)) { struct hostent *hp = gethostbyaddr( &((struct sockaddr_in *)sa)->sin_addr, sizeof(struct in_addr), sa->sa_family); ret = check_hostent_err(hp); if (ret == 0) { /* Name looked up successfully. */ ret = snprintf(node, nodelen, "%s", hp->h_name); if (ret < 0 || (size_t)ret >= nodelen) { return EAI_MEMORY; } if (flags & NI_NOFQDN) { p = strchr(node,'.'); if (p) { *p = '\0'; } } return 0; } if (flags & NI_NAMEREQD) { /* If we require a name and didn't get one, * automatically fail. */ return ret; } /* Otherwise just fall into the numeric host code... */ } p = inet_ntoa(((struct sockaddr_in *)sa)->sin_addr); ret = snprintf(node, nodelen, "%s", p); if (ret < 0 || (size_t)ret >= nodelen) { return EAI_MEMORY; } return 0; } static int getservicenameinfo(const struct sockaddr *sa, char *service, size_t servicelen, int flags) { int ret = -1; int port = ntohs(((struct sockaddr_in *)sa)->sin_port); if (!(flags & NI_NUMERICSERV)) { struct servent *se = getservbyport( port, (flags & NI_DGRAM) ? "udp" : "tcp"); if (se && se->s_name) { /* Service name looked up successfully. */ ret = snprintf(service, servicelen, "%s", se->s_name); if (ret < 0 || (size_t)ret >= servicelen) { return EAI_MEMORY; } return 0; } /* Otherwise just fall into the numeric service code... */ } ret = snprintf(service, servicelen, "%d", port); if (ret < 0 || (size_t)ret >= servicelen) { return EAI_MEMORY; } return 0; } /* * Convert an ipv4 address to a hostname. * * Bugs: - No IPv6 support. */ int rep_getnameinfo(const struct sockaddr *sa, socklen_t salen, char *node, size_t nodelen, char *service, size_t servicelen, int flags) { /* Invalid arguments. */ if (sa == NULL || (node == NULL && service == NULL)) { return EAI_FAIL; } if (sa->sa_family != AF_INET) { return EAI_FAIL; } if (salen < sizeof(struct sockaddr_in)) { return EAI_FAIL; } if (node) { return gethostnameinfo(sa, node, nodelen, flags); } if (service) { return getservicenameinfo(sa, service, servicelen, flags); } return 0; } ntdb-1.0/lib/replace/getaddrinfo.h000066400000000000000000000061411224151530700171230ustar00rootroot00000000000000/* PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. */ /*------------------------------------------------------------------------- * * getaddrinfo.h * Support getaddrinfo() on platforms that don't have it. * * Note: we use our own routines on platforms that don't HAVE_STRUCT_ADDRINFO, * whether or not the library routine getaddrinfo() can be found. This * policy is needed because on some platforms a manually installed libbind.a * may provide getaddrinfo(), yet the system headers may not provide the * struct definitions needed to call it. To avoid conflict with the libbind * definition in such cases, we rename our routines to pg_xxx() via macros. * in lib/replace we use rep_xxx() * This code will also work on platforms where struct addrinfo is defined * in the system headers but no getaddrinfo() can be located. * * Copyright (c) 2003-2007, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ #ifndef GETADDRINFO_H #define GETADDRINFO_H #ifndef HAVE_GETADDRINFO /* Rename private copies per comments above */ #ifdef getaddrinfo #undef getaddrinfo #endif #define getaddrinfo rep_getaddrinfo #define HAVE_GETADDRINFO #ifdef freeaddrinfo #undef freeaddrinfo #endif #define freeaddrinfo rep_freeaddrinfo #define HAVE_FREEADDRINFO #ifdef gai_strerror #undef gai_strerror #endif #define gai_strerror rep_gai_strerror #define HAVE_GAI_STRERROR #ifdef getnameinfo #undef getnameinfo #endif #define getnameinfo rep_getnameinfo #ifndef HAVE_GETNAMEINFO #define HAVE_GETNAMEINFO #endif extern int rep_getaddrinfo(const char *node, const char *service, const struct addrinfo * hints, struct addrinfo ** res); extern void rep_freeaddrinfo(struct addrinfo * res); extern const char *rep_gai_strerror(int errcode); extern int rep_getnameinfo(const struct sockaddr * sa, socklen_t salen, char *node, size_t nodelen, char *service, size_t servicelen, int flags); #endif /* HAVE_GETADDRINFO */ #endif /* GETADDRINFO_H */ ntdb-1.0/lib/replace/getifaddrs.c000066400000000000000000000205421224151530700167450ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Samba utility functions Copyright (C) Andrew Tridgell 1998 Copyright (C) Jeremy Allison 2007 Copyright (C) Jelmer Vernooij 2007 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #define SOCKET_WRAPPER_NOT_REPLACE #include "replace.h" #include "system/network.h" #include #include #include #ifdef HAVE_SYS_TIME_H #include #endif #ifndef SIOCGIFCONF #ifdef HAVE_SYS_SOCKIO_H #include #endif #endif #ifdef HAVE_IFACE_GETIFADDRS #define _FOUND_IFACE_ANY #else void rep_freeifaddrs(struct ifaddrs *ifp) { if (ifp != NULL) { free(ifp->ifa_name); free(ifp->ifa_addr); free(ifp->ifa_netmask); free(ifp->ifa_dstaddr); freeifaddrs(ifp->ifa_next); free(ifp); } } static struct sockaddr *sockaddr_dup(struct sockaddr *sa) { struct sockaddr *ret; socklen_t socklen; #ifdef HAVE_SOCKADDR_SA_LEN socklen = sa->sa_len; #else socklen = sizeof(struct sockaddr_storage); #endif ret = calloc(1, socklen); if (ret == NULL) return NULL; memcpy(ret, sa, socklen); return ret; } #endif #if HAVE_IFACE_IFCONF /* this works for Linux 2.2, Solaris 2.5, SunOS4, HPUX 10.20, OSF1 V4.0, Ultrix 4.4, SCO Unix 3.2, IRIX 6.4 and FreeBSD 3.2. It probably also works on any BSD style system. */ int rep_getifaddrs(struct ifaddrs **ifap) { struct ifconf ifc; char buff[8192]; int fd, i, n; struct ifreq *ifr=NULL; struct ifaddrs *curif; struct ifaddrs *lastif = NULL; *ifap = NULL; if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { return -1; } ifc.ifc_len = sizeof(buff); ifc.ifc_buf = buff; if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) { close(fd); return -1; } ifr = ifc.ifc_req; n = ifc.ifc_len / sizeof(struct ifreq); /* Loop through interfaces, looking for given IP address */ for (i=n-1; i>=0; i--) { if (ioctl(fd, SIOCGIFFLAGS, &ifr[i]) == -1) { freeifaddrs(*ifap); close(fd); return -1; } curif = calloc(1, sizeof(struct ifaddrs)); if (curif == NULL) { freeifaddrs(*ifap); close(fd); return -1; } curif->ifa_name = strdup(ifr[i].ifr_name); if (curif->ifa_name == NULL) { free(curif); freeifaddrs(*ifap); close(fd); return -1; } curif->ifa_flags = ifr[i].ifr_flags; curif->ifa_dstaddr = NULL; curif->ifa_data = NULL; curif->ifa_next = NULL; curif->ifa_addr = NULL; if (ioctl(fd, SIOCGIFADDR, &ifr[i]) != -1) { curif->ifa_addr = sockaddr_dup(&ifr[i].ifr_addr); if (curif->ifa_addr == NULL) { free(curif->ifa_name); free(curif); freeifaddrs(*ifap); close(fd); return -1; } } curif->ifa_netmask = NULL; if (ioctl(fd, SIOCGIFNETMASK, &ifr[i]) != -1) { curif->ifa_netmask = sockaddr_dup(&ifr[i].ifr_addr); if (curif->ifa_netmask == NULL) { if (curif->ifa_addr != NULL) { free(curif->ifa_addr); } free(curif->ifa_name); free(curif); freeifaddrs(*ifap); close(fd); return -1; } } if (lastif == NULL) { *ifap = curif; } else { lastif->ifa_next = curif; } lastif = curif; } close(fd); return 0; } #define _FOUND_IFACE_ANY #endif /* HAVE_IFACE_IFCONF */ #ifdef HAVE_IFACE_IFREQ #ifndef I_STR #include #endif /**************************************************************************** this should cover most of the streams based systems Thanks to Andrej.Borsenkow@mow.siemens.ru for several ideas in this code ****************************************************************************/ int rep_getifaddrs(struct ifaddrs **ifap) { struct ifreq ifreq; struct strioctl strioctl; char buff[8192]; int fd, i, n; struct ifreq *ifr=NULL; struct ifaddrs *curif; struct ifaddrs *lastif = NULL; *ifap = NULL; if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { return -1; } strioctl.ic_cmd = SIOCGIFCONF; strioctl.ic_dp = buff; strioctl.ic_len = sizeof(buff); if (ioctl(fd, I_STR, &strioctl) < 0) { close(fd); return -1; } /* we can ignore the possible sizeof(int) here as the resulting number of interface structures won't change */ n = strioctl.ic_len / sizeof(struct ifreq); /* we will assume that the kernel returns the length as an int at the start of the buffer if the offered size is a multiple of the structure size plus an int */ if (n*sizeof(struct ifreq) + sizeof(int) == strioctl.ic_len) { ifr = (struct ifreq *)(buff + sizeof(int)); } else { ifr = (struct ifreq *)buff; } /* Loop through interfaces */ for (i = 0; iifa_next = curif; } strioctl.ic_cmd = SIOCGIFFLAGS; strioctl.ic_dp = (char *)&ifreq; strioctl.ic_len = sizeof(struct ifreq); if (ioctl(fd, I_STR, &strioctl) != 0) { freeifaddrs(*ifap); return -1; } curif->ifa_flags = ifreq.ifr_flags; strioctl.ic_cmd = SIOCGIFADDR; strioctl.ic_dp = (char *)&ifreq; strioctl.ic_len = sizeof(struct ifreq); if (ioctl(fd, I_STR, &strioctl) != 0) { freeifaddrs(*ifap); return -1; } curif->ifa_name = strdup(ifreq.ifr_name); curif->ifa_addr = sockaddr_dup(&ifreq.ifr_addr); curif->ifa_dstaddr = NULL; curif->ifa_data = NULL; curif->ifa_next = NULL; curif->ifa_netmask = NULL; strioctl.ic_cmd = SIOCGIFNETMASK; strioctl.ic_dp = (char *)&ifreq; strioctl.ic_len = sizeof(struct ifreq); if (ioctl(fd, I_STR, &strioctl) != 0) { freeifaddrs(*ifap); return -1; } curif->ifa_netmask = sockaddr_dup(&ifreq.ifr_addr); lastif = curif; } close(fd); return 0; } #define _FOUND_IFACE_ANY #endif /* HAVE_IFACE_IFREQ */ #ifdef HAVE_IFACE_AIX /**************************************************************************** this one is for AIX (tested on 4.2) ****************************************************************************/ int rep_getifaddrs(struct ifaddrs **ifap) { char buff[8192]; int fd, i; struct ifconf ifc; struct ifreq *ifr=NULL; struct ifaddrs *curif; struct ifaddrs *lastif = NULL; *ifap = NULL; if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { return -1; } ifc.ifc_len = sizeof(buff); ifc.ifc_buf = buff; if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) { close(fd); return -1; } ifr = ifc.ifc_req; /* Loop through interfaces */ i = ifc.ifc_len; while (i > 0) { unsigned int inc; inc = ifr->ifr_addr.sa_len; if (ioctl(fd, SIOCGIFADDR, ifr) != 0) { freeaddrinfo(*ifap); return -1; } curif = calloc(1, sizeof(struct ifaddrs)); if (lastif == NULL) { *ifap = curif; } else { lastif->ifa_next = curif; } curif->ifa_name = strdup(ifr->ifr_name); curif->ifa_addr = sockaddr_dup(&ifr->ifr_addr); curif->ifa_dstaddr = NULL; curif->ifa_data = NULL; curif->ifa_netmask = NULL; curif->ifa_next = NULL; if (ioctl(fd, SIOCGIFFLAGS, ifr) != 0) { freeaddrinfo(*ifap); return -1; } curif->ifa_flags = ifr->ifr_flags; if (ioctl(fd, SIOCGIFNETMASK, ifr) != 0) { freeaddrinfo(*ifap); return -1; } curif->ifa_netmask = sockaddr_dup(&ifr->ifr_addr); lastif = curif; next: /* * Patch from Archie Cobbs (archie@whistle.com). The * addresses in the SIOCGIFCONF interface list have a * minimum size. Usually this doesn't matter, but if * your machine has tunnel interfaces, etc. that have * a zero length "link address", this does matter. */ if (inc < sizeof(ifr->ifr_addr)) inc = sizeof(ifr->ifr_addr); inc += IFNAMSIZ; ifr = (struct ifreq*) (((char*) ifr) + inc); i -= inc; } close(fd); return 0; } #define _FOUND_IFACE_ANY #endif /* HAVE_IFACE_AIX */ #ifndef _FOUND_IFACE_ANY int rep_getifaddrs(struct ifaddrs **ifap) { errno = ENOSYS; return -1; } #endif ntdb-1.0/lib/replace/hdr_replace.h000066400000000000000000000001241224151530700171000ustar00rootroot00000000000000/* this is a replacement header for a missing system header */ #include "replace.h" ntdb-1.0/lib/replace/inet_aton.c000066400000000000000000000022321224151530700166050ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * replacement functions * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" /** * We know that we have inet_pton from earlier libreplace checks. */ int rep_inet_aton(const char *src, struct in_addr *dst) { return (inet_pton(AF_INET, src, dst) > 0) ? 1 : 0; } ntdb-1.0/lib/replace/inet_ntoa.c000066400000000000000000000024751224151530700166160ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * replacement routines for broken systems * Copyright (C) Andrew Tridgell 2003 * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" /** * NOTE: this is not thread safe, but it can't be, either * since it returns a pointer to static memory. */ char *rep_inet_ntoa(struct in_addr ip) { uint8_t *p = (uint8_t *)&ip.s_addr; static char buf[18]; slprintf(buf, 17, "%d.%d.%d.%d", (int)p[0], (int)p[1], (int)p[2], (int)p[3]); return buf; } ntdb-1.0/lib/replace/inet_ntop.c000066400000000000000000000116321224151530700166300ustar00rootroot00000000000000/* * Copyright (C) 1996-2001 Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "replace.h" #include "system/network.h" #define NS_INT16SZ 2 #define NS_IN6ADDRSZ 16 /* * WARNING: Don't even consider trying to compile this on a system where * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ static const char *inet_ntop4(const unsigned char *src, char *dst, socklen_t size); #ifdef AF_INET6 static const char *inet_ntop6(const unsigned char *src, char *dst, socklen_t size); #endif /* char * * isc_net_ntop(af, src, dst, size) * convert a network format address to presentation format. * return: * pointer to presentation format address (`dst'), or NULL (see errno). * author: * Paul Vixie, 1996. */ const char * rep_inet_ntop(int af, const void *src, char *dst, socklen_t size) { switch (af) { case AF_INET: return (inet_ntop4(src, dst, size)); #ifdef AF_INET6 case AF_INET6: return (inet_ntop6(src, dst, size)); #endif default: errno = EAFNOSUPPORT; return (NULL); } /* NOTREACHED */ } /* const char * * inet_ntop4(src, dst, size) * format an IPv4 address * return: * `dst' (as a const) * notes: * (1) uses no statics * (2) takes a unsigned char* not an in_addr as input * author: * Paul Vixie, 1996. */ static const char * inet_ntop4(const unsigned char *src, char *dst, socklen_t size) { static const char *fmt = "%u.%u.%u.%u"; char tmp[sizeof "255.255.255.255"]; size_t len; len = snprintf(tmp, sizeof tmp, fmt, src[0], src[1], src[2], src[3]); if (len >= size) { errno = ENOSPC; return (NULL); } memcpy(dst, tmp, len + 1); return (dst); } /* const char * * isc_inet_ntop6(src, dst, size) * convert IPv6 binary address into presentation (printable) format * author: * Paul Vixie, 1996. */ #ifdef AF_INET6 static const char * inet_ntop6(const unsigned char *src, char *dst, socklen_t size) { /* * Note that int32_t and int16_t need only be "at least" large enough * to contain a value of the specified size. On some systems, like * Crays, there is no such thing as an integer variable with 16 bits. * Keep this in mind if you think this function should have been coded * to use pointer overlays. All the world's not a VAX. */ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp; struct { int base, len; } best, cur; unsigned int words[NS_IN6ADDRSZ / NS_INT16SZ]; int i, inc; /* * Preprocess: * Copy the input (bytewise) array into a wordwise array. * Find the longest run of 0x00's in src[] for :: shorthanding. */ memset(words, '\0', sizeof words); for (i = 0; i < NS_IN6ADDRSZ; i++) words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3)); best.base = -1; best.len = 0; cur.base = -1; cur.len = 0; for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { if (words[i] == 0) { if (cur.base == -1) cur.base = i, cur.len = 1; else cur.len++; } else { if (cur.base != -1) { if (best.base == -1 || cur.len > best.len) best = cur; cur.base = -1; } } } if (cur.base != -1) { if (best.base == -1 || cur.len > best.len) best = cur; } if (best.base != -1 && best.len < 2) best.base = -1; /* * Format the result. */ tp = tmp; for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { /* Are we inside the best run of 0x00's? */ if (best.base != -1 && i >= best.base && i < (best.base + best.len)) { if (i == best.base) *tp++ = ':'; continue; } /* Are we following an initial run of 0x00s or any real hex? */ if (i != 0) *tp++ = ':'; /* Is this address an encapsulated IPv4? */ if (i == 6 && best.base == 0 && (best.len == 6 || (best.len == 5 && words[5] == 0xffff))) { if (!inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp))) return (NULL); tp += strlen(tp); break; } inc = snprintf(tp, 5, "%x", words[i]); if (inc >= 5) { abort(); } tp += inc; } /* Was it a trailing run of 0x00's? */ if (best.base != -1 && (best.base + best.len) == (NS_IN6ADDRSZ / NS_INT16SZ)) *tp++ = ':'; *tp++ = '\0'; /* * Check for overflow, copy, and we're done. */ if ((size_t)(tp - tmp) > size) { errno = ENOSPC; return (NULL); } memcpy(dst, tmp, tp - tmp); return (dst); } #endif /* AF_INET6 */ ntdb-1.0/lib/replace/inet_pton.c000066400000000000000000000120101224151530700166170ustar00rootroot00000000000000/* * Copyright (C) 1996-2001 Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "replace.h" #include "system/network.h" #define NS_INT16SZ 2 #define NS_INADDRSZ 4 #define NS_IN6ADDRSZ 16 /* * WARNING: Don't even consider trying to compile this on a system where * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ static int inet_pton4(const char *src, unsigned char *dst); #ifdef AF_INET6 static int inet_pton6(const char *src, unsigned char *dst); #endif /* int * inet_pton(af, src, dst) * convert from presentation format (which usually means ASCII printable) * to network format (which is usually some kind of binary format). * return: * 1 if the address was valid for the specified address family * 0 if the address wasn't valid (`dst' is untouched in this case) * -1 if some other error occurred (`dst' is untouched in this case, too) * author: * Paul Vixie, 1996. */ int rep_inet_pton(int af, const char *src, void *dst) { switch (af) { case AF_INET: return (inet_pton4(src, dst)); #ifdef AF_INET6 case AF_INET6: return (inet_pton6(src, dst)); #endif default: errno = EAFNOSUPPORT; return (-1); } /* NOTREACHED */ } /* int * inet_pton4(src, dst) * like inet_aton() but without all the hexadecimal and shorthand. * return: * 1 if `src' is a valid dotted quad, else 0. * notice: * does not touch `dst' unless it's returning 1. * author: * Paul Vixie, 1996. */ static int inet_pton4(src, dst) const char *src; unsigned char *dst; { static const char digits[] = "0123456789"; int saw_digit, octets, ch; unsigned char tmp[NS_INADDRSZ], *tp; saw_digit = 0; octets = 0; *(tp = tmp) = 0; while ((ch = *src++) != '\0') { const char *pch; if ((pch = strchr(digits, ch)) != NULL) { unsigned int new = *tp * 10 + (pch - digits); if (new > 255) return (0); *tp = new; if (! saw_digit) { if (++octets > 4) return (0); saw_digit = 1; } } else if (ch == '.' && saw_digit) { if (octets == 4) return (0); *++tp = 0; saw_digit = 0; } else return (0); } if (octets < 4) return (0); memcpy(dst, tmp, NS_INADDRSZ); return (1); } /* int * inet_pton6(src, dst) * convert presentation level address to network order binary form. * return: * 1 if `src' is a valid [RFC1884 2.2] address, else 0. * notice: * (1) does not touch `dst' unless it's returning 1. * (2) :: in a full address is silently ignored. * credit: * inspired by Mark Andrews. * author: * Paul Vixie, 1996. */ #ifdef AF_INET6 static int inet_pton6(src, dst) const char *src; unsigned char *dst; { static const char xdigits_l[] = "0123456789abcdef", xdigits_u[] = "0123456789ABCDEF"; unsigned char tmp[NS_IN6ADDRSZ], *tp, *endp, *colonp; const char *xdigits, *curtok; int ch, saw_xdigit; unsigned int val; memset((tp = tmp), '\0', NS_IN6ADDRSZ); endp = tp + NS_IN6ADDRSZ; colonp = NULL; /* Leading :: requires some special handling. */ if (*src == ':') if (*++src != ':') return (0); curtok = src; saw_xdigit = 0; val = 0; while ((ch = *src++) != '\0') { const char *pch; if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) pch = strchr((xdigits = xdigits_u), ch); if (pch != NULL) { val <<= 4; val |= (pch - xdigits); if (val > 0xffff) return (0); saw_xdigit = 1; continue; } if (ch == ':') { curtok = src; if (!saw_xdigit) { if (colonp) return (0); colonp = tp; continue; } if (tp + NS_INT16SZ > endp) return (0); *tp++ = (unsigned char) (val >> 8) & 0xff; *tp++ = (unsigned char) val & 0xff; saw_xdigit = 0; val = 0; continue; } if (ch == '.' && ((tp + NS_INADDRSZ) <= endp) && inet_pton4(curtok, tp) > 0) { tp += NS_INADDRSZ; saw_xdigit = 0; break; /* '\0' was seen by inet_pton4(). */ } return (0); } if (saw_xdigit) { if (tp + NS_INT16SZ > endp) return (0); *tp++ = (unsigned char) (val >> 8) & 0xff; *tp++ = (unsigned char) val & 0xff; } if (colonp != NULL) { /* * Since some memmove()'s erroneously fail to handle * overlapping regions, we'll do the shift by hand. */ const int n = tp - colonp; int i; for (i = 1; i <= n; i++) { endp[- i] = colonp[n - i]; colonp[n - i] = 0; } tp = endp; } if (tp != endp) return (0); memcpy(dst, tmp, NS_IN6ADDRSZ); return (1); } #endif ntdb-1.0/lib/replace/poll.c000066400000000000000000000064051224151530700156010ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. poll.c - poll wrapper This file is based on code from libssh (LGPLv2.1+ at the time it was downloaded), thus the following copyrights: Copyright (c) 2009-2010 by Andreas Schneider Copyright (c) 2003-2009 by Aris Adamantiadis Copyright (c) 2009 Aleksandar Kanchev Copyright (C) Volker Lendecke 2011 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/select.h" #ifdef HAVE_SYS_TIME_H #include #endif #ifdef HAVE_SYS_IOCTL_H #include #endif int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout) { fd_set rfds, wfds, efds; struct timeval tv, *ptv; int max_fd; int rc; nfds_t i; if ((fds == NULL) && (nfds != 0)) { errno = EFAULT; return -1; } FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds); rc = 0; max_fd = 0; /* compute fd_sets and find largest descriptor */ for (i = 0; i < nfds; i++) { if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) { fds[i].revents = POLLNVAL; continue; } if (fds[i].events & (POLLIN | POLLRDNORM)) { FD_SET(fds[i].fd, &rfds); } if (fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND)) { FD_SET(fds[i].fd, &wfds); } if (fds[i].events & (POLLPRI | POLLRDBAND)) { FD_SET(fds[i].fd, &efds); } if (fds[i].fd > max_fd && (fds[i].events & (POLLIN | POLLOUT | POLLPRI | POLLRDNORM | POLLRDBAND | POLLWRNORM | POLLWRBAND))) { max_fd = fds[i].fd; } } if (timeout < 0) { ptv = NULL; } else { ptv = &tv; if (timeout == 0) { tv.tv_sec = 0; tv.tv_usec = 0; } else { tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; } } rc = select(max_fd + 1, &rfds, &wfds, &efds, ptv); if (rc < 0) { return -1; } for (rc = 0, i = 0; i < nfds; i++) { if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) { continue; } fds[i].revents = 0; if (FD_ISSET(fds[i].fd, &rfds)) { int err = errno; int available = 0; int ret; /* support for POLLHUP */ ret = ioctl(fds[i].fd, FIONREAD, &available); if ((ret == -1) || (available == 0)) { fds[i].revents |= POLLHUP; } else { fds[i].revents |= fds[i].events & (POLLIN | POLLRDNORM); } errno = err; } if (FD_ISSET(fds[i].fd, &wfds)) { fds[i].revents |= fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND); } if (FD_ISSET(fds[i].fd, &efds)) { fds[i].revents |= fds[i].events & (POLLPRI | POLLRDBAND); } if (fds[i].revents & ~POLLHUP) { rc++; } } return rc; } ntdb-1.0/lib/replace/repdir_getdents.c000066400000000000000000000102131224151530700200050ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Copyright (C) Andrew Tridgell 2005 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ /* a replacement for opendir/readdir/telldir/seekdir/closedir for BSD systems This is needed because the existing directory handling in FreeBSD and OpenBSD (and possibly NetBSD) doesn't correctly handle unlink() on files in a directory where telldir() has been used. On a block boundary it will occasionally miss a file when seekdir() is used to return to a position previously recorded with telldir(). This also fixes a severe performance and memory usage problem with telldir() on BSD systems. Each call to telldir() in BSD adds an entry to a linked list, and those entries are cleaned up on closedir(). This means with a large directory closedir() can take an arbitrary amount of time, causing network timeouts as millions of telldir() entries are freed Note! This replacement code is not portable. It relies on getdents() always leaving the file descriptor at a seek offset that is a multiple of DIR_BUF_SIZE. If the code detects that this doesn't happen then it will abort(). It also does not handle directories with offsets larger than can be stored in a long, This code is available under other free software licenses as well. Contact the author. */ #include #include #include #include #include #include #include #define DIR_BUF_BITS 9 #define DIR_BUF_SIZE (1<fd = open(dname, O_RDONLY); if (d->fd == -1) { free(d); return NULL; } if (fstat(d->fd, &sb) < 0) { close(d->fd); free(d); return NULL; } if (!S_ISDIR(sb.st_mode)) { close(d->fd); free(d); errno = ENOTDIR; return NULL; } d->ofs = 0; d->seekpos = 0; d->nbytes = 0; return (DIR *)d; } struct dirent *readdir(DIR *dir) { struct dir_buf *d = (struct dir_buf *)dir; struct dirent *de; if (d->ofs >= d->nbytes) { d->seekpos = lseek(d->fd, 0, SEEK_CUR); d->nbytes = getdents(d->fd, d->buf, DIR_BUF_SIZE); d->ofs = 0; } if (d->ofs >= d->nbytes) { return NULL; } de = (struct dirent *)&d->buf[d->ofs]; d->ofs += de->d_reclen; return de; } long telldir(DIR *dir) { struct dir_buf *d = (struct dir_buf *)dir; if (d->ofs >= d->nbytes) { d->seekpos = lseek(d->fd, 0, SEEK_CUR); d->ofs = 0; d->nbytes = 0; } /* this relies on seekpos always being a multiple of DIR_BUF_SIZE. Is that always true on BSD systems? */ if (d->seekpos & (DIR_BUF_SIZE-1)) { abort(); } return d->seekpos + d->ofs; } void seekdir(DIR *dir, long ofs) { struct dir_buf *d = (struct dir_buf *)dir; d->seekpos = lseek(d->fd, ofs & ~(DIR_BUF_SIZE-1), SEEK_SET); d->nbytes = getdents(d->fd, d->buf, DIR_BUF_SIZE); d->ofs = 0; while (d->ofs < (ofs & (DIR_BUF_SIZE-1))) { if (readdir(dir) == NULL) break; } } void rewinddir(DIR *dir) { seekdir(dir, 0); } int closedir(DIR *dir) { struct dir_buf *d = (struct dir_buf *)dir; int r = close(d->fd); if (r != 0) { return r; } free(d); return 0; } #ifndef dirfd /* darn, this is a macro on some systems. */ int dirfd(DIR *dir) { struct dir_buf *d = (struct dir_buf *)dir; return d->fd; } #endif ntdb-1.0/lib/replace/repdir_getdirentries.c000066400000000000000000000106231224151530700210450ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Copyright (C) Andrew Tridgell 2005 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ /* a replacement for opendir/readdir/telldir/seekdir/closedir for BSD systems using getdirentries This is needed because the existing directory handling in FreeBSD and OpenBSD (and possibly NetBSD) doesn't correctly handle unlink() on files in a directory where telldir() has been used. On a block boundary it will occasionally miss a file when seekdir() is used to return to a position previously recorded with telldir(). This also fixes a severe performance and memory usage problem with telldir() on BSD systems. Each call to telldir() in BSD adds an entry to a linked list, and those entries are cleaned up on closedir(). This means with a large directory closedir() can take an arbitrary amount of time, causing network timeouts as millions of telldir() entries are freed Note! This replacement code is not portable. It relies on getdirentries() always leaving the file descriptor at a seek offset that is a multiple of DIR_BUF_SIZE. If the code detects that this doesn't happen then it will abort(). It also does not handle directories with offsets larger than can be stored in a long, This code is available under other free software licenses as well. Contact the author. */ #include "replace.h" #include #include #include #include #include #include #include #define DIR_BUF_BITS 9 #define DIR_BUF_SIZE (1<fd = open(dname, O_RDONLY); if (d->fd == -1) { free(d); return NULL; } if (fstat(d->fd, &sb) < 0) { close(d->fd); free(d); return NULL; } if (!S_ISDIR(sb.st_mode)) { close(d->fd); free(d); errno = ENOTDIR; return NULL; } d->ofs = 0; d->seekpos = 0; d->nbytes = 0; return (DIR *)d; } struct dirent *readdir(DIR *dir) { struct dir_buf *d = (struct dir_buf *)dir; struct dirent *de; if (d->ofs >= d->nbytes) { long pos; d->nbytes = getdirentries(d->fd, d->buf, DIR_BUF_SIZE, &pos); d->seekpos = pos; d->ofs = 0; } if (d->ofs >= d->nbytes) { return NULL; } de = (struct dirent *)&d->buf[d->ofs]; d->ofs += de->d_reclen; return de; } #ifdef TELLDIR_TAKES_CONST_DIR long telldir(const DIR *dir) #else long telldir(DIR *dir) #endif { struct dir_buf *d = (struct dir_buf *)dir; if (d->ofs >= d->nbytes) { d->seekpos = lseek(d->fd, 0, SEEK_CUR); d->ofs = 0; d->nbytes = 0; } /* this relies on seekpos always being a multiple of DIR_BUF_SIZE. Is that always true on BSD systems? */ if (d->seekpos & (DIR_BUF_SIZE-1)) { abort(); } return d->seekpos + d->ofs; } #ifdef SEEKDIR_RETURNS_INT int seekdir(DIR *dir, long ofs) #else void seekdir(DIR *dir, long ofs) #endif { struct dir_buf *d = (struct dir_buf *)dir; long pos; d->seekpos = lseek(d->fd, ofs & ~(DIR_BUF_SIZE-1), SEEK_SET); d->nbytes = getdirentries(d->fd, d->buf, DIR_BUF_SIZE, &pos); d->ofs = 0; while (d->ofs < (ofs & (DIR_BUF_SIZE-1))) { if (readdir(dir) == NULL) break; } #ifdef SEEKDIR_RETURNS_INT return -1; #endif } void rewinddir(DIR *dir) { seekdir(dir, 0); } int closedir(DIR *dir) { struct dir_buf *d = (struct dir_buf *)dir; int r = close(d->fd); if (r != 0) { return r; } free(d); return 0; } #ifndef dirfd /* darn, this is a macro on some systems. */ int dirfd(DIR *dir) { struct dir_buf *d = (struct dir_buf *)dir; return d->fd; } #endif ntdb-1.0/lib/replace/replace-test.h000066400000000000000000000003371224151530700172260ustar00rootroot00000000000000#ifndef __LIB_REPLACE_REPLACE_TEST_H__ #define __LIB_REPLACE_REPLACE_TEST_H__ int libreplace_test_strptime(void); int test_readdir_os2_delete(void); int getifaddrs_test(void); #endif /* __LIB_REPLACE_REPLACE_TEST_H__ */ ntdb-1.0/lib/replace/replace-testsuite.h000066400000000000000000000003631224151530700202770ustar00rootroot00000000000000#ifndef __LIB_REPLACE_REPLACE_TESTSUITE_H__ #define __LIB_REPLACE_REPLACE_TESTSUITE_H__ #include struct torture_context; bool torture_local_replace(struct torture_context *ctx); #endif /* __LIB_REPLACE_REPLACE_TESTSUITE_H__ */ ntdb-1.0/lib/replace/replace.c000066400000000000000000000447311224151530700162520ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. replacement routines for broken systems Copyright (C) Andrew Tridgell 1992-1998 Copyright (C) Jelmer Vernooij 2005-2008 Copyright (C) Matthieu Patou 2010 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/filesys.h" #include "system/time.h" #include "system/network.h" #include "system/passwd.h" #include "system/syslog.h" #include "system/locale.h" #include "system/wait.h" #ifdef _WIN32 #define mkdir(d,m) _mkdir(d) #endif void replace_dummy(void); void replace_dummy(void) {} #ifndef HAVE_FTRUNCATE /******************************************************************* ftruncate for operating systems that don't have it ********************************************************************/ int rep_ftruncate(int f, off_t l) { #ifdef HAVE_CHSIZE return chsize(f,l); #elif defined(F_FREESP) struct flock fl; fl.l_whence = 0; fl.l_len = 0; fl.l_start = l; fl.l_type = F_WRLCK; return fcntl(f, F_FREESP, &fl); #else #error "you must have a ftruncate function" #endif } #endif /* HAVE_FTRUNCATE */ #ifndef HAVE_STRLCPY /* like strncpy but does not 0 fill the buffer and always null terminates. bufsize is the size of the destination buffer */ size_t rep_strlcpy(char *d, const char *s, size_t bufsize) { size_t len = strlen(s); size_t ret = len; if (bufsize <= 0) return 0; if (len >= bufsize) len = bufsize-1; memcpy(d, s, len); d[len] = 0; return ret; } #endif #ifndef HAVE_STRLCAT /* like strncat but does not 0 fill the buffer and always null terminates. bufsize is the length of the buffer, which should be one more than the maximum resulting string length */ size_t rep_strlcat(char *d, const char *s, size_t bufsize) { size_t len1 = strlen(d); size_t len2 = strlen(s); size_t ret = len1 + len2; if (len1+len2 >= bufsize) { if (bufsize < (len1+1)) { return ret; } len2 = bufsize - (len1+1); } if (len2 > 0) { memcpy(d+len1, s, len2); d[len1+len2] = 0; } return ret; } #endif #ifndef HAVE_MKTIME /******************************************************************* a mktime() replacement for those who don't have it - contributed by C.A. Lademann Corrections by richard.kettlewell@kewill.com ********************************************************************/ #define MINUTE 60 #define HOUR 60*MINUTE #define DAY 24*HOUR #define YEAR 365*DAY time_t rep_mktime(struct tm *t) { struct tm *u; time_t epoch = 0; int n; int mon [] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }, y, m, i; if(t->tm_year < 70) return((time_t)-1); n = t->tm_year + 1900 - 1; epoch = (t->tm_year - 70) * YEAR + ((n / 4 - n / 100 + n / 400) - (1969 / 4 - 1969 / 100 + 1969 / 400)) * DAY; y = t->tm_year + 1900; m = 0; for(i = 0; i < t->tm_mon; i++) { epoch += mon [m] * DAY; if(m == 1 && y % 4 == 0 && (y % 100 != 0 || y % 400 == 0)) epoch += DAY; if(++m > 11) { m = 0; y++; } } epoch += (t->tm_mday - 1) * DAY; epoch += t->tm_hour * HOUR + t->tm_min * MINUTE + t->tm_sec; if((u = localtime(&epoch)) != NULL) { t->tm_sec = u->tm_sec; t->tm_min = u->tm_min; t->tm_hour = u->tm_hour; t->tm_mday = u->tm_mday; t->tm_mon = u->tm_mon; t->tm_year = u->tm_year; t->tm_wday = u->tm_wday; t->tm_yday = u->tm_yday; t->tm_isdst = u->tm_isdst; } return(epoch); } #endif /* !HAVE_MKTIME */ #ifndef HAVE_INITGROUPS /**************************************************************************** some systems don't have an initgroups call ****************************************************************************/ int rep_initgroups(char *name, gid_t id) { #ifndef HAVE_SETGROUPS /* yikes! no SETGROUPS or INITGROUPS? how can this work? */ errno = ENOSYS; return -1; #else /* HAVE_SETGROUPS */ #include gid_t *grouplst = NULL; int max_gr = NGROUPS_MAX; int ret; int i,j; struct group *g; char *gr; if((grouplst = malloc(sizeof(gid_t) * max_gr)) == NULL) { errno = ENOMEM; return -1; } grouplst[0] = id; i = 1; while (i < max_gr && ((g = (struct group *)getgrent()) != (struct group *)NULL)) { if (g->gr_gid == id) continue; j = 0; gr = g->gr_mem[0]; while (gr && (*gr != (char)NULL)) { if (strcmp(name,gr) == 0) { grouplst[i] = g->gr_gid; i++; gr = (char *)NULL; break; } gr = g->gr_mem[++j]; } } endgrent(); ret = setgroups(i, grouplst); free(grouplst); return ret; #endif /* HAVE_SETGROUPS */ } #endif /* HAVE_INITGROUPS */ #ifndef HAVE_MEMMOVE /******************************************************************* safely copies memory, ensuring no overlap problems. this is only used if the machine does not have its own memmove(). this is not the fastest algorithm in town, but it will do for our needs. ********************************************************************/ void *rep_memmove(void *dest,const void *src,int size) { unsigned long d,s; int i; if (dest==src || !size) return(dest); d = (unsigned long)dest; s = (unsigned long)src; if ((d >= (s+size)) || (s >= (d+size))) { /* no overlap */ memcpy(dest,src,size); return(dest); } if (d < s) { /* we can forward copy */ if (s-d >= sizeof(int) && !(s%sizeof(int)) && !(d%sizeof(int)) && !(size%sizeof(int))) { /* do it all as words */ int *idest = (int *)dest; int *isrc = (int *)src; size /= sizeof(int); for (i=0;i= sizeof(int) && !(s%sizeof(int)) && !(d%sizeof(int)) && !(size%sizeof(int))) { /* do it all as words */ int *idest = (int *)dest; int *isrc = (int *)src; size /= sizeof(int); for (i=size-1;i>=0;i--) idest[i] = isrc[i]; } else { /* simplest */ char *cdest = (char *)dest; char *csrc = (char *)src; for (i=size-1;i>=0;i--) cdest[i] = csrc[i]; } } return(dest); } #endif /* HAVE_MEMMOVE */ #ifndef HAVE_STRDUP /**************************************************************************** duplicate a string ****************************************************************************/ char *rep_strdup(const char *s) { size_t len; char *ret; if (!s) return(NULL); len = strlen(s)+1; ret = (char *)malloc(len); if (!ret) return(NULL); memcpy(ret,s,len); return(ret); } #endif /* HAVE_STRDUP */ #ifndef HAVE_SETLINEBUF void rep_setlinebuf(FILE *stream) { setvbuf(stream, (char *)NULL, _IOLBF, 0); } #endif /* HAVE_SETLINEBUF */ #ifndef HAVE_VSYSLOG #ifdef HAVE_SYSLOG void rep_vsyslog (int facility_priority, const char *format, va_list arglist) { char *msg = NULL; vasprintf(&msg, format, arglist); if (!msg) return; syslog(facility_priority, "%s", msg); free(msg); } #endif /* HAVE_SYSLOG */ #endif /* HAVE_VSYSLOG */ #ifndef HAVE_STRNLEN /** Some platforms don't have strnlen **/ size_t rep_strnlen(const char *s, size_t max) { size_t len; for (len = 0; len < max; len++) { if (s[len] == '\0') { break; } } return len; } #endif #ifndef HAVE_STRNDUP /** Some platforms don't have strndup. **/ char *rep_strndup(const char *s, size_t n) { char *ret; n = strnlen(s, n); ret = malloc(n+1); if (!ret) return NULL; memcpy(ret, s, n); ret[n] = 0; return ret; } #endif #if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4) int rep_waitpid(pid_t pid,int *status,int options) { return wait4(pid, status, options, NULL); } #endif #ifndef HAVE_SETEUID int rep_seteuid(uid_t euid) { #ifdef HAVE_SETRESUID return setresuid(-1, euid, -1); #else errno = ENOSYS; return -1; #endif } #endif #ifndef HAVE_SETEGID int rep_setegid(gid_t egid) { #ifdef HAVE_SETRESGID return setresgid(-1, egid, -1); #else errno = ENOSYS; return -1; #endif } #endif /******************************************************************* os/2 also doesn't have chroot ********************************************************************/ #ifndef HAVE_CHROOT int rep_chroot(const char *dname) { errno = ENOSYS; return -1; } #endif /***************************************************************** Possibly replace mkstemp if it is broken. *****************************************************************/ #ifndef HAVE_SECURE_MKSTEMP int rep_mkstemp(char *template) { /* have a reasonable go at emulating it. Hope that the system mktemp() isn't completely hopeless */ mktemp(template); if (template[0] == 0) return -1; return open(template, O_CREAT|O_EXCL|O_RDWR, 0600); } #endif #ifndef HAVE_MKDTEMP char *rep_mkdtemp(char *template) { char *dname; if ((dname = mktemp(template))) { if (mkdir(dname, 0700) >= 0) { return dname; } } return NULL; } #endif /***************************************************************** Watch out: this is not thread safe. *****************************************************************/ #ifndef HAVE_PREAD ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset) { if (lseek(__fd, __offset, SEEK_SET) != __offset) { return -1; } return read(__fd, __buf, __nbytes); } #endif /***************************************************************** Watch out: this is not thread safe. *****************************************************************/ #ifndef HAVE_PWRITE ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset) { if (lseek(__fd, __offset, SEEK_SET) != __offset) { return -1; } return write(__fd, __buf, __nbytes); } #endif #ifndef HAVE_STRCASESTR char *rep_strcasestr(const char *haystack, const char *needle) { const char *s; size_t nlen = strlen(needle); for (s=haystack;*s;s++) { if (toupper(*needle) == toupper(*s) && strncasecmp(s, needle, nlen) == 0) { return (char *)((uintptr_t)s); } } return NULL; } #endif #ifndef HAVE_STRTOK_R /* based on GLIBC version, copyright Free Software Foundation */ char *rep_strtok_r(char *s, const char *delim, char **save_ptr) { char *token; if (s == NULL) s = *save_ptr; s += strspn(s, delim); if (*s == '\0') { *save_ptr = s; return NULL; } token = s; s = strpbrk(token, delim); if (s == NULL) { *save_ptr = token + strlen(token); } else { *s = '\0'; *save_ptr = s + 1; } return token; } #endif #ifndef HAVE_STRTOLL long long int rep_strtoll(const char *str, char **endptr, int base) { #ifdef HAVE_STRTOQ return strtoq(str, endptr, base); #elif defined(HAVE___STRTOLL) return __strtoll(str, endptr, base); #elif SIZEOF_LONG == SIZEOF_LONG_LONG return (long long int) strtol(str, endptr, base); #else # error "You need a strtoll function" #endif } #else #ifdef HAVE_BSD_STRTOLL #ifdef HAVE_STRTOQ long long int rep_strtoll(const char *str, char **endptr, int base) { long long int nb = strtoq(str, endptr, base); /* In linux EINVAL is only returned if base is not ok */ if (errno == EINVAL) { if (base == 0 || (base >1 && base <37)) { /* Base was ok so it's because we were not * able to make the convertion. * Let's reset errno. */ errno = 0; } } return nb; } #else #error "You need the strtoq function" #endif /* HAVE_STRTOQ */ #endif /* HAVE_BSD_STRTOLL */ #endif /* HAVE_STRTOLL */ #ifndef HAVE_STRTOULL unsigned long long int rep_strtoull(const char *str, char **endptr, int base) { #ifdef HAVE_STRTOUQ return strtouq(str, endptr, base); #elif defined(HAVE___STRTOULL) return __strtoull(str, endptr, base); #elif SIZEOF_LONG == SIZEOF_LONG_LONG return (unsigned long long int) strtoul(str, endptr, base); #else # error "You need a strtoull function" #endif } #else #ifdef HAVE_BSD_STRTOLL #ifdef HAVE_STRTOUQ unsigned long long int rep_strtoull(const char *str, char **endptr, int base) { unsigned long long int nb = strtouq(str, endptr, base); /* In linux EINVAL is only returned if base is not ok */ if (errno == EINVAL) { if (base == 0 || (base >1 && base <37)) { /* Base was ok so it's because we were not * able to make the convertion. * Let's reset errno. */ errno = 0; } } return nb; } #else #error "You need the strtouq function" #endif /* HAVE_STRTOUQ */ #endif /* HAVE_BSD_STRTOLL */ #endif /* HAVE_STRTOULL */ #ifndef HAVE_SETENV int rep_setenv(const char *name, const char *value, int overwrite) { char *p; size_t l1, l2; int ret; if (!overwrite && getenv(name)) { return 0; } l1 = strlen(name); l2 = strlen(value); p = malloc(l1+l2+2); if (p == NULL) { return -1; } memcpy(p, name, l1); p[l1] = '='; memcpy(p+l1+1, value, l2); p[l1+l2+1] = 0; ret = putenv(p); if (ret != 0) { free(p); } return ret; } #endif #ifndef HAVE_UNSETENV int rep_unsetenv(const char *name) { extern char **environ; size_t len = strlen(name); size_t i, count; if (environ == NULL || getenv(name) == NULL) { return 0; } for (i=0;environ[i];i++) /* noop */ ; count=i; for (i=0;i= needlelen) { char *p = (char *)memchr(haystack, *(const char *)needle, haystacklen-(needlelen-1)); if (!p) return NULL; if (memcmp(p, needle, needlelen) == 0) { return p; } haystack = p+1; haystacklen -= (p - (const char *)haystack) + 1; } return NULL; } #endif #if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_vdprintf(int fd, const char *format, va_list ap) { char *s = NULL; int ret; vasprintf(&s, format, ap); if (s == NULL) { errno = ENOMEM; return -1; } ret = write(fd, s, strlen(s)); free(s); return ret; } #endif #if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_dprintf(int fd, const char *format, ...) { int ret; va_list ap; va_start(ap, format); ret = vdprintf(fd, format, ap); va_end(ap); return ret; } #endif #ifndef HAVE_GET_CURRENT_DIR_NAME char *rep_get_current_dir_name(void) { char buf[PATH_MAX+1]; char *p; p = getcwd(buf, sizeof(buf)); if (p == NULL) { return NULL; } return strdup(p); } #endif #ifndef HAVE_STRERROR_R int rep_strerror_r(int errnum, char *buf, size_t buflen) { char *s = strerror(errnum); if (strlen(s)+1 > buflen) { errno = ERANGE; return -1; } strncpy(buf, s, buflen); return 0; } #endif #ifndef HAVE_CLOCK_GETTIME int rep_clock_gettime(clockid_t clk_id, struct timespec *tp) { struct timeval tval; switch (clk_id) { case 0: /* CLOCK_REALTIME :*/ #ifdef HAVE_GETTIMEOFDAY_TZ gettimeofday(&tval,NULL); #else gettimeofday(&tval); #endif tp->tv_sec = tval.tv_sec; tp->tv_nsec = tval.tv_usec * 1000; break; default: errno = EINVAL; return -1; } return 0; } #endif #ifndef HAVE_MEMALIGN void *rep_memalign( size_t align, size_t size ) { #if defined(HAVE_POSIX_MEMALIGN) void *p = NULL; int ret = posix_memalign( &p, align, size ); if ( ret == 0 ) return p; return NULL; #else /* On *BSD systems memaligns doesn't exist, but memory will * be aligned on allocations of > pagesize. */ #if defined(SYSCONF_SC_PAGESIZE) size_t pagesize = (size_t)sysconf(_SC_PAGESIZE); #elif defined(HAVE_GETPAGESIZE) size_t pagesize = (size_t)getpagesize(); #else size_t pagesize = (size_t)-1; #endif if (pagesize == (size_t)-1) { errno = ENOSYS; return NULL; } if (size < pagesize) { size = pagesize; } return malloc(size); #endif } #endif #ifndef HAVE_GETPEEREID int rep_getpeereid(int s, uid_t *uid, gid_t *gid) { #if defined(HAVE_PEERCRED) struct ucred cred; socklen_t cred_len = sizeof(struct ucred); int ret; #undef getsockopt ret = getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void *)&cred, &cred_len); if (ret != 0) { return -1; } if (cred_len != sizeof(struct ucred)) { errno = EINVAL; return -1; } *uid = cred.uid; *gid = cred.gid; return 0; #else errno = ENOSYS; return -1; #endif } #endif #ifndef HAVE_USLEEP int rep_usleep(useconds_t sec) { struct timeval tval; /* * Fake it with select... */ tval.tv_sec = 0; tval.tv_usec = usecs/1000; select(0,NULL,NULL,NULL,&tval); return 0; } #endif /* HAVE_USLEEP */ #ifndef HAVE_SETPROCTITLE void rep_setproctitle(const char *fmt, ...) { } #endif ntdb-1.0/lib/replace/replace.h000066400000000000000000000461531224151530700162570ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. macros to go along with the lib/replace/ portability layer code Copyright (C) Andrew Tridgell 2005 Copyright (C) Jelmer Vernooij 2006-2008 Copyright (C) Jeremy Allison 2007. ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifndef _LIBREPLACE_REPLACE_H #define _LIBREPLACE_REPLACE_H #ifndef NO_CONFIG_H #include "config.h" #endif #ifdef HAVE_STANDARDS_H #include #endif #include #include #include #include #if defined(_MSC_VER) || defined(__MINGW32__) #include "win32_replace.h" #endif #ifdef HAVE_INTTYPES_H #define __STDC_FORMAT_MACROS #include #elif HAVE_STDINT_H #include /* force off HAVE_INTTYPES_H so that roken doesn't try to include both, which causes a warning storm on irix */ #undef HAVE_INTTYPES_H #endif #ifdef HAVE_MALLOC_H #include #endif #ifndef __PRI64_PREFIX # if __WORDSIZE == 64 && ! defined __APPLE__ # define __PRI64_PREFIX "l" # else # define __PRI64_PREFIX "ll" # endif #endif /* Decimal notation. */ #ifndef PRId8 # define PRId8 "d" #endif #ifndef PRId16 # define PRId16 "d" #endif #ifndef PRId32 # define PRId32 "d" #endif #ifndef PRId64 # define PRId64 __PRI64_PREFIX "d" #endif #ifndef PRIi8 # define PRIi8 "i" #endif #ifndef PRIi16 # define PRIi16 "i" #endif #ifndef PRIi32 # define PRIi32 "i" #endif #ifndef PRIi64 # define PRIi64 __PRI64_PREFIX "i" #endif #ifndef PRIu8 # define PRIu8 "u" #endif #ifndef PRIu16 # define PRIu16 "u" #endif #ifndef PRIu32 # define PRIu32 "u" #endif #ifndef PRIu64 # define PRIu64 __PRI64_PREFIX "u" #endif #ifndef SCNd8 # define SCNd8 "hhd" #endif #ifndef SCNd16 # define SCNd16 "hd" #endif #ifndef SCNd32 # define SCNd32 "d" #endif #ifndef SCNd64 # define SCNd64 __PRI64_PREFIX "d" #endif #ifndef SCNi8 # define SCNi8 "hhi" #endif #ifndef SCNi16 # define SCNi16 "hi" #endif #ifndef SCNi32 # define SCNi32 "i" #endif #ifndef SCNi64 # define SCNi64 __PRI64_PREFIX "i" #endif #ifndef SCNu8 # define SCNu8 "hhu" #endif #ifndef SCNu16 # define SCNu16 "hu" #endif #ifndef SCNu32 # define SCNu32 "u" #endif #ifndef SCNu64 # define SCNu64 __PRI64_PREFIX "u" #endif #ifdef HAVE_BSD_STRING_H #include #endif #ifdef HAVE_BSD_UNISTD_H #include #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_STRINGS_H #include #endif #ifdef HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SETPROCTITLE_H #include #endif #if STDC_HEADERS #include #include #endif #ifdef HAVE_LINUX_TYPES_H /* * This is needed as some broken header files require this to be included early */ #include #endif #ifndef HAVE_STRERROR extern char *sys_errlist[]; #define strerror(i) sys_errlist[i] #endif #ifndef HAVE_ERRNO_DECL extern int errno; #endif #ifndef HAVE_STRDUP #define strdup rep_strdup char *rep_strdup(const char *s); #endif #ifndef HAVE_MEMMOVE #define memmove rep_memmove void *rep_memmove(void *dest,const void *src,int size); #endif #ifndef HAVE_MEMMEM #define memmem rep_memmem void *rep_memmem(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen); #endif #ifndef HAVE_MEMALIGN #define memalign rep_memalign void *rep_memalign(size_t boundary, size_t size); #endif #ifndef HAVE_MKTIME #define mktime rep_mktime /* prototype is in "system/time.h" */ #endif #ifndef HAVE_TIMEGM #define timegm rep_timegm /* prototype is in "system/time.h" */ #endif #ifndef HAVE_UTIME #define utime rep_utime /* prototype is in "system/time.h" */ #endif #ifndef HAVE_UTIMES #define utimes rep_utimes /* prototype is in "system/time.h" */ #endif #ifndef HAVE_STRLCPY #define strlcpy rep_strlcpy size_t rep_strlcpy(char *d, const char *s, size_t bufsize); #endif #ifndef HAVE_STRLCAT #define strlcat rep_strlcat size_t rep_strlcat(char *d, const char *s, size_t bufsize); #endif #if (defined(BROKEN_STRNDUP) || !defined(HAVE_STRNDUP)) #undef HAVE_STRNDUP #define strndup rep_strndup char *rep_strndup(const char *s, size_t n); #endif #if (defined(BROKEN_STRNLEN) || !defined(HAVE_STRNLEN)) #undef HAVE_STRNLEN #define strnlen rep_strnlen size_t rep_strnlen(const char *s, size_t n); #endif #if !HAVE_DECL_ENVIRON #ifdef __APPLE__ #include #define environ (*_NSGetEnviron()) #else extern char **environ; #endif #endif #ifndef HAVE_SETENV #define setenv rep_setenv int rep_setenv(const char *name, const char *value, int overwrite); #else #ifndef HAVE_SETENV_DECL int setenv(const char *name, const char *value, int overwrite); #endif #endif #ifndef HAVE_UNSETENV #define unsetenv rep_unsetenv int rep_unsetenv(const char *name); #endif #ifndef HAVE_SETEUID #define seteuid rep_seteuid int rep_seteuid(uid_t); #endif #ifndef HAVE_SETEGID #define setegid rep_setegid int rep_setegid(gid_t); #endif #if (defined(USE_SETRESUID) && !defined(HAVE_SETRESUID_DECL)) /* stupid glibc */ int setresuid(uid_t ruid, uid_t euid, uid_t suid); #endif #if (defined(USE_SETRESUID) && !defined(HAVE_SETRESGID_DECL)) int setresgid(gid_t rgid, gid_t egid, gid_t sgid); #endif #ifndef HAVE_CHOWN #define chown rep_chown int rep_chown(const char *path, uid_t uid, gid_t gid); #endif #ifndef HAVE_CHROOT #define chroot rep_chroot int rep_chroot(const char *dirname); #endif #ifndef HAVE_LINK #define link rep_link int rep_link(const char *oldpath, const char *newpath); #endif #ifndef HAVE_READLINK #define readlink rep_readlink ssize_t rep_readlink(const char *path, char *buf, size_t bufsize); #endif #ifndef HAVE_SYMLINK #define symlink rep_symlink int rep_symlink(const char *oldpath, const char *newpath); #endif #ifndef HAVE_REALPATH #define realpath rep_realpath char *rep_realpath(const char *path, char *resolved_path); #endif #ifndef HAVE_LCHOWN #define lchown rep_lchown int rep_lchown(const char *fname,uid_t uid,gid_t gid); #endif #ifdef HAVE_UNIX_H #include #endif #ifndef HAVE_SETLINEBUF #define setlinebuf rep_setlinebuf void rep_setlinebuf(FILE *); #endif #ifndef HAVE_STRCASESTR #define strcasestr rep_strcasestr char *rep_strcasestr(const char *haystack, const char *needle); #endif #ifndef HAVE_STRTOK_R #define strtok_r rep_strtok_r char *rep_strtok_r(char *s, const char *delim, char **save_ptr); #endif #ifndef HAVE_STRTOLL #define strtoll rep_strtoll long long int rep_strtoll(const char *str, char **endptr, int base); #else #ifdef HAVE_BSD_STRTOLL #define strtoll rep_strtoll long long int rep_strtoll(const char *str, char **endptr, int base); #endif #endif #ifndef HAVE_STRTOULL #define strtoull rep_strtoull unsigned long long int rep_strtoull(const char *str, char **endptr, int base); #else #ifdef HAVE_BSD_STRTOLL /* yes, it's not HAVE_BSD_STRTOULL */ #define strtoull rep_strtoull unsigned long long int rep_strtoull(const char *str, char **endptr, int base); #endif #endif #ifndef HAVE_FTRUNCATE #define ftruncate rep_ftruncate int rep_ftruncate(int,off_t); #endif #ifndef HAVE_INITGROUPS #define initgroups rep_initgroups int rep_initgroups(char *name, gid_t id); #endif #if !defined(HAVE_BZERO) && defined(HAVE_MEMSET) #define bzero(a,b) memset((a),'\0',(b)) #endif #ifndef HAVE_DLERROR #define dlerror rep_dlerror char *rep_dlerror(void); #endif #ifndef HAVE_DLOPEN #define dlopen rep_dlopen #ifdef DLOPEN_TAKES_UNSIGNED_FLAGS void *rep_dlopen(const char *name, unsigned int flags); #else void *rep_dlopen(const char *name, int flags); #endif #endif #ifndef HAVE_DLSYM #define dlsym rep_dlsym void *rep_dlsym(void *handle, const char *symbol); #endif #ifndef HAVE_DLCLOSE #define dlclose rep_dlclose int rep_dlclose(void *handle); #endif #ifndef HAVE_SOCKETPAIR #define socketpair rep_socketpair /* prototype is in system/network.h */ #endif #ifndef PRINTF_ATTRIBUTE #if (__GNUC__ >= 3) && (__GNUC_MINOR__ >= 1 ) /** Use gcc attribute to check printf fns. a1 is the 1-based index of * the parameter containing the format, and a2 the index of the first * argument. Note that some gcc 2.x versions don't handle this * properly **/ #define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2))) #else #define PRINTF_ATTRIBUTE(a1, a2) #endif #endif #ifndef _DEPRECATED_ #if (__GNUC__ >= 3) && (__GNUC_MINOR__ >= 1 ) #define _DEPRECATED_ __attribute__ ((deprecated)) #else #define _DEPRECATED_ #endif #endif #if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define vdprintf rep_vdprintf int rep_vdprintf(int fd, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0); #endif #if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define dprintf rep_dprintf int rep_dprintf(int fd, const char *format, ...) PRINTF_ATTRIBUTE(2,3); #endif #if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define vasprintf rep_vasprintf int rep_vasprintf(char **ptr, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0); #endif #if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define snprintf rep_snprintf int rep_snprintf(char *,size_t ,const char *, ...) PRINTF_ATTRIBUTE(3,4); #endif #if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define vsnprintf rep_vsnprintf int rep_vsnprintf(char *,size_t ,const char *, va_list ap) PRINTF_ATTRIBUTE(3,0); #endif #if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define asprintf rep_asprintf int rep_asprintf(char **,const char *, ...) PRINTF_ATTRIBUTE(2,3); #endif #if !defined(HAVE_C99_VSNPRINTF) #ifdef REPLACE_BROKEN_PRINTF /* * We do not redefine printf by default * as it breaks the build if system headers * use __attribute__((format(printf, 3, 0))) * instead of __attribute__((format(__printf__, 3, 0))) */ #define printf rep_printf #endif int rep_printf(const char *, ...) PRINTF_ATTRIBUTE(1,2); #endif #if !defined(HAVE_C99_VSNPRINTF) #define fprintf rep_fprintf int rep_fprintf(FILE *stream, const char *, ...) PRINTF_ATTRIBUTE(2,3); #endif #ifndef HAVE_VSYSLOG #ifdef HAVE_SYSLOG #define vsyslog rep_vsyslog void rep_vsyslog (int facility_priority, const char *format, va_list arglist) PRINTF_ATTRIBUTE(2,0); #endif #endif /* we used to use these fns, but now we have good replacements for snprintf and vsnprintf */ #define slprintf snprintf #ifndef HAVE_VA_COPY #undef va_copy #ifdef HAVE___VA_COPY #define va_copy(dest, src) __va_copy(dest, src) #else #define va_copy(dest, src) (dest) = (src) #endif #endif #ifndef HAVE_VOLATILE #define volatile #endif #ifndef HAVE_COMPARISON_FN_T typedef int (*comparison_fn_t)(const void *, const void *); #endif #ifndef HAVE_WORKING_STRPTIME #define strptime rep_strptime struct tm; char *rep_strptime(const char *buf, const char *format, struct tm *tm); #endif #ifndef HAVE_DUP2 #define dup2 rep_dup2 int rep_dup2(int oldfd, int newfd); #endif /* Load header file for dynamic linking stuff */ #ifdef HAVE_DLFCN_H #include #endif #ifndef RTLD_LAZY #define RTLD_LAZY 0 #endif #ifndef RTLD_NOW #define RTLD_NOW 0 #endif #ifndef RTLD_GLOBAL #define RTLD_GLOBAL 0 #endif #ifndef HAVE_SECURE_MKSTEMP #define mkstemp(path) rep_mkstemp(path) int rep_mkstemp(char *temp); #endif #ifndef HAVE_MKDTEMP #define mkdtemp rep_mkdtemp char *rep_mkdtemp(char *template); #endif #ifndef HAVE_PREAD #define pread rep_pread ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset); #define LIBREPLACE_PREAD_REPLACED 1 #else #define LIBREPLACE_PREAD_NOT_REPLACED 1 #endif #ifndef HAVE_PWRITE #define pwrite rep_pwrite ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset); #define LIBREPLACE_PWRITE_REPLACED 1 #else #define LIBREPLACE_PWRITE_NOT_REPLACED 1 #endif #if !defined(HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA) #define inet_ntoa rep_inet_ntoa /* prototype is in "system/network.h" */ #endif #ifndef HAVE_INET_PTON #define inet_pton rep_inet_pton /* prototype is in "system/network.h" */ #endif #ifndef HAVE_INET_NTOP #define inet_ntop rep_inet_ntop /* prototype is in "system/network.h" */ #endif #ifndef HAVE_INET_ATON #define inet_aton rep_inet_aton /* prototype is in "system/network.h" */ #endif #ifndef HAVE_CONNECT #define connect rep_connect /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GETHOSTBYNAME #define gethostbyname rep_gethostbyname /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GETIFADDRS #define getifaddrs rep_getifaddrs /* prototype is in "system/network.h" */ #endif #ifndef HAVE_FREEIFADDRS #define freeifaddrs rep_freeifaddrs /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GET_CURRENT_DIR_NAME #define get_current_dir_name rep_get_current_dir_name char *rep_get_current_dir_name(void); #endif #ifndef HAVE_STRERROR_R #define strerror_r rep_strerror_r int rep_strerror_r(int errnum, char *buf, size_t buflen); #endif #if !defined(HAVE_CLOCK_GETTIME) #define clock_gettime rep_clock_gettime #endif #ifdef HAVE_LIMITS_H #include #endif #ifdef HAVE_SYS_PARAM_H #include #endif /* The extra casts work around common compiler bugs. */ #define _TYPE_SIGNED(t) (! ((t) 0 < (t) -1)) /* The outer cast is needed to work around a bug in Cray C 5.0.3.0. It is necessary at least when t == time_t. */ #define _TYPE_MINIMUM(t) ((t) (_TYPE_SIGNED (t) \ ? ~ (t) 0 << (sizeof (t) * CHAR_BIT - 1) : (t) 0)) #define _TYPE_MAXIMUM(t) ((t) (~ (t) 0 - _TYPE_MINIMUM (t))) #ifndef UINT16_MAX #define UINT16_MAX 65535 #endif #ifndef UINT32_MAX #define UINT32_MAX (4294967295U) #endif #ifndef UINT64_MAX #define UINT64_MAX ((uint64_t)-1) #endif #ifndef INT64_MAX #define INT64_MAX 9223372036854775807LL #endif #ifndef CHAR_BIT #define CHAR_BIT 8 #endif #ifndef INT32_MAX #define INT32_MAX _TYPE_MAXIMUM(int32_t) #endif #ifdef HAVE_STDBOOL_H #include #endif #if !defined(HAVE_BOOL) #ifdef HAVE__Bool #define bool _Bool #else typedef int bool; #endif #endif #if !defined(HAVE_INTPTR_T) typedef long long intptr_t ; #endif #if !defined(HAVE_UINTPTR_T) typedef unsigned long long uintptr_t ; #endif #if !defined(HAVE_PTRDIFF_T) typedef unsigned long long ptrdiff_t ; #endif /* * to prevent from doing a redefine of 'bool' * * IRIX, HPUX, MacOS 10 and Solaris need BOOL_DEFINED * Tru64 needs _BOOL_EXISTS * AIX needs _BOOL,_TRUE,_FALSE */ #ifndef BOOL_DEFINED #define BOOL_DEFINED #endif #ifndef _BOOL_EXISTS #define _BOOL_EXISTS #endif #ifndef _BOOL #define _BOOL #endif #ifndef __bool_true_false_are_defined #define __bool_true_false_are_defined #endif #ifndef true #define true (1) #endif #ifndef false #define false (0) #endif #ifndef _TRUE #define _TRUE true #endif #ifndef _FALSE #define _FALSE false #endif #ifndef HAVE_FUNCTION_MACRO #ifdef HAVE_func_MACRO #define __FUNCTION__ __func__ #else #define __FUNCTION__ ("") #endif #endif #ifndef MIN #define MIN(a,b) ((a)<(b)?(a):(b)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif #if !defined(HAVE_VOLATILE) #define volatile #endif /** this is a warning hack. The idea is to use this everywhere that we get the "discarding const" warning from gcc. That doesn't actually fix the problem of course, but it means that when we do get to cleaning them up we can do it by searching the code for discard_const. It also means that other error types aren't as swamped by the noise of hundreds of const warnings, so we are more likely to notice when we get new errors. Please only add more uses of this macro when you find it _really_ hard to fix const warnings. Our aim is to eventually use this function in only a very few places. Also, please call this via the discard_const_p() macro interface, as that makes the return type safe. */ #define discard_const(ptr) ((void *)((uintptr_t)(ptr))) /** Type-safe version of discard_const */ #define discard_const_p(type, ptr) ((type *)discard_const(ptr)) #ifndef __STRING #define __STRING(x) #x #endif #ifndef __STRINGSTRING #define __STRINGSTRING(x) __STRING(x) #endif #ifndef __LINESTR__ #define __LINESTR__ __STRINGSTRING(__LINE__) #endif #ifndef __location__ #define __location__ __FILE__ ":" __LINESTR__ #endif /** * zero a structure */ #define ZERO_STRUCT(x) memset((char *)&(x), 0, sizeof(x)) /** * zero a structure given a pointer to the structure */ #define ZERO_STRUCTP(x) do { if ((x) != NULL) memset((char *)(x), 0, sizeof(*(x))); } while(0) /** * zero a structure given a pointer to the structure - no zero check */ #define ZERO_STRUCTPN(x) memset((char *)(x), 0, sizeof(*(x))) /* zero an array - note that sizeof(array) must work - ie. it must not be a pointer */ #define ZERO_ARRAY(x) memset((char *)(x), 0, sizeof(x)) /** * work out how many elements there are in a static array */ #define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) /** * pointer difference macro */ #define PTR_DIFF(p1,p2) ((ptrdiff_t)(((const char *)(p1)) - (const char *)(p2))) #if MMAP_BLACKLIST #undef HAVE_MMAP #endif #ifdef __COMPAR_FN_T #define QSORT_CAST (__compar_fn_t) #endif #ifndef QSORT_CAST #define QSORT_CAST (int (*)(const void *, const void *)) #endif #ifndef PATH_MAX #define PATH_MAX 1024 #endif #ifndef MAX_DNS_NAME_LENGTH #define MAX_DNS_NAME_LENGTH 256 /* Actually 255 but +1 for terminating null. */ #endif #ifndef HAVE_CRYPT char *ufc_crypt(const char *key, const char *salt); #define crypt ufc_crypt #else #ifdef HAVE_CRYPT_H #include #endif #endif /* these macros gain us a few percent of speed on gcc */ #if (__GNUC__ >= 3) /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1 as its first argument */ #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #endif #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) #endif #else #ifndef likely #define likely(x) (x) #endif #ifndef unlikely #define unlikely(x) (x) #endif #endif #ifndef HAVE_FDATASYNC #define fdatasync(fd) fsync(fd) #elif !defined(HAVE_DECL_FDATASYNC) int fdatasync(int ); #endif /* these are used to mark symbols as local to a shared lib, or * publicly available via the shared lib API */ #ifndef _PUBLIC_ #ifdef HAVE_VISIBILITY_ATTR #define _PUBLIC_ __attribute__((visibility("default"))) #else #define _PUBLIC_ #endif #endif #ifndef _PRIVATE_ #ifdef HAVE_VISIBILITY_ATTR # define _PRIVATE_ __attribute__((visibility("hidden"))) #else # define _PRIVATE_ #endif #endif #ifndef HAVE_POLL #define poll rep_poll /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GETPEEREID #define getpeereid rep_getpeereid int rep_getpeereid(int s, uid_t *uid, gid_t *gid); #endif #ifndef HAVE_USLEEP #define usleep rep_usleep typedef long useconds_t; int usleep(useconds_t); #endif #ifndef HAVE_SETPROCTITLE #define setproctitle rep_setproctitle void rep_setproctitle(const char *fmt, ...) PRINTF_ATTRIBUTE(1, 2); #endif #endif /* _LIBREPLACE_REPLACE_H */ ntdb-1.0/lib/replace/snprintf.c000066400000000000000000001066631224151530700165050ustar00rootroot00000000000000/* * NOTE: If you change this file, please merge it into rsync, samba, etc. */ /* * Copyright Patrick Powell 1995 * This code is based on code written by Patrick Powell (papowell@astart.com) * It may be used for any purpose as long as this notice remains intact * on all source code distributions */ /************************************************************** * Original: * Patrick Powell Tue Apr 11 09:48:21 PDT 1995 * A bombproof version of doprnt (dopr) included. * Sigh. This sort of thing is always nasty do deal with. Note that * the version here does not include floating point... * * snprintf() is used instead of sprintf() as it does limit checks * for string length. This covers a nasty loophole. * * The other functions are there to prevent NULL pointers from * causing nast effects. * * More Recently: * Brandon Long 9/15/96 for mutt 0.43 * This was ugly. It is still ugly. I opted out of floating point * numbers, but the formatter understands just about everything * from the normal C string format, at least as far as I can tell from * the Solaris 2.5 printf(3S) man page. * * Brandon Long 10/22/97 for mutt 0.87.1 * Ok, added some minimal floating point support, which means this * probably requires libm on most operating systems. Don't yet * support the exponent (e,E) and sigfig (g,G). Also, fmtint() * was pretty badly broken, it just wasn't being exercised in ways * which showed it, so that's been fixed. Also, formated the code * to mutt conventions, and removed dead code left over from the * original. Also, there is now a builtin-test, just compile with: * gcc -DTEST_SNPRINTF -o snprintf snprintf.c -lm * and run snprintf for results. * * Thomas Roessler 01/27/98 for mutt 0.89i * The PGP code was using unsigned hexadecimal formats. * Unfortunately, unsigned formats simply didn't work. * * Michael Elkins 03/05/98 for mutt 0.90.8 * The original code assumed that both snprintf() and vsnprintf() were * missing. Some systems only have snprintf() but not vsnprintf(), so * the code is now broken down under HAVE_SNPRINTF and HAVE_VSNPRINTF. * * Andrew Tridgell (tridge@samba.org) Oct 1998 * fixed handling of %.0f * added test for HAVE_LONG_DOUBLE * * tridge@samba.org, idra@samba.org, April 2001 * got rid of fcvt code (twas buggy and made testing harder) * added C99 semantics * * date: 2002/12/19 19:56:31; author: herb; state: Exp; lines: +2 -0 * actually print args for %g and %e * * date: 2002/06/03 13:37:52; author: jmcd; state: Exp; lines: +8 -0 * Since includes.h isn't included here, VA_COPY has to be defined here. I don't * see any include file that is guaranteed to be here, so I'm defining it * locally. Fixes AIX and Solaris builds. * * date: 2002/06/03 03:07:24; author: tridge; state: Exp; lines: +5 -13 * put the ifdef for HAVE_VA_COPY in one place rather than in lots of * functions * * date: 2002/05/17 14:51:22; author: jmcd; state: Exp; lines: +21 -4 * Fix usage of va_list passed as an arg. Use __va_copy before using it * when it exists. * * date: 2002/04/16 22:38:04; author: idra; state: Exp; lines: +20 -14 * Fix incorrect zpadlen handling in fmtfp. * Thanks to Ollie Oldham for spotting it. * few mods to make it easier to compile the tests. * addedd the "Ollie" test to the floating point ones. * * Martin Pool (mbp@samba.org) April 2003 * Remove NO_CONFIG_H so that the test case can be built within a source * tree with less trouble. * Remove unnecessary SAFE_FREE() definition. * * Martin Pool (mbp@samba.org) May 2003 * Put in a prototype for dummy_snprintf() to quiet compiler warnings. * * Move #endif to make sure VA_COPY, LDOUBLE, etc are defined even * if the C library has some snprintf functions already. * * Darren Tucker (dtucker@zip.com.au) 2005 * Fix bug allowing read overruns of the source string with "%.*s" * Usually harmless unless the read runs outside the process' allocation * (eg if your malloc does guard pages) in which case it will segfault. * From OpenSSH. Also added test for same. * * Simo Sorce (idra@samba.org) Jan 2006 * * Add support for position independent parameters * fix fmtstr now it conforms to sprintf wrt min.max * **************************************************************/ #include "replace.h" #include "system/locale.h" #ifdef TEST_SNPRINTF /* need math library headers for testing */ /* In test mode, we pretend that this system doesn't have any snprintf * functions, regardless of what config.h says. */ # undef HAVE_SNPRINTF # undef HAVE_VSNPRINTF # undef HAVE_C99_VSNPRINTF # undef HAVE_ASPRINTF # undef HAVE_VASPRINTF # include #endif /* TEST_SNPRINTF */ #if defined(HAVE_SNPRINTF) && defined(HAVE_VSNPRINTF) && defined(HAVE_C99_VSNPRINTF) /* only include stdio.h if we are not re-defining snprintf or vsnprintf */ #include /* make the compiler happy with an empty file */ void dummy_snprintf(void); void dummy_snprintf(void) {} #endif /* HAVE_SNPRINTF, etc */ /* yes this really must be a ||. Don't muck with this (tridge) */ #if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF) #ifdef HAVE_LONG_DOUBLE #define LDOUBLE long double #else #define LDOUBLE double #endif #ifdef HAVE_LONG_LONG #define LLONG long long #else #define LLONG long #endif #ifndef VA_COPY #ifdef HAVE_VA_COPY #define VA_COPY(dest, src) va_copy(dest, src) #else #ifdef HAVE___VA_COPY #define VA_COPY(dest, src) __va_copy(dest, src) #else #define VA_COPY(dest, src) (dest) = (src) #endif #endif /* * dopr(): poor man's version of doprintf */ /* format read states */ #define DP_S_DEFAULT 0 #define DP_S_FLAGS 1 #define DP_S_MIN 2 #define DP_S_DOT 3 #define DP_S_MAX 4 #define DP_S_MOD 5 #define DP_S_CONV 6 #define DP_S_DONE 7 /* format flags - Bits */ #define DP_F_MINUS (1 << 0) #define DP_F_PLUS (1 << 1) #define DP_F_SPACE (1 << 2) #define DP_F_NUM (1 << 3) #define DP_F_ZERO (1 << 4) #define DP_F_UP (1 << 5) #define DP_F_UNSIGNED (1 << 6) /* Conversion Flags */ #define DP_C_CHAR 1 #define DP_C_SHORT 2 #define DP_C_LONG 3 #define DP_C_LDOUBLE 4 #define DP_C_LLONG 5 #define DP_C_SIZET 6 /* Chunk types */ #define CNK_FMT_STR 0 #define CNK_INT 1 #define CNK_OCTAL 2 #define CNK_UINT 3 #define CNK_HEX 4 #define CNK_FLOAT 5 #define CNK_CHAR 6 #define CNK_STRING 7 #define CNK_PTR 8 #define CNK_NUM 9 #define CNK_PRCNT 10 #define char_to_int(p) ((p)- '0') #ifndef MAX #define MAX(p,q) (((p) >= (q)) ? (p) : (q)) #endif struct pr_chunk { int type; /* chunk type */ int num; /* parameter number */ int min; int max; int flags; int cflags; int start; int len; LLONG value; LDOUBLE fvalue; char *strvalue; void *pnum; struct pr_chunk *min_star; struct pr_chunk *max_star; struct pr_chunk *next; }; struct pr_chunk_x { struct pr_chunk **chunks; int num; }; static int dopr(char *buffer, size_t maxlen, const char *format, va_list args_in); static void fmtstr(char *buffer, size_t *currlen, size_t maxlen, char *value, int flags, int min, int max); static void fmtint(char *buffer, size_t *currlen, size_t maxlen, LLONG value, int base, int min, int max, int flags); static void fmtfp(char *buffer, size_t *currlen, size_t maxlen, LDOUBLE fvalue, int min, int max, int flags); static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c); static struct pr_chunk *new_chunk(void); static int add_cnk_list_entry(struct pr_chunk_x **list, int max_num, struct pr_chunk *chunk); static int dopr(char *buffer, size_t maxlen, const char *format, va_list args_in) { char ch; int state; int pflag; int pnum; int pfirst; size_t currlen; va_list args; const char *base; struct pr_chunk *chunks = NULL; struct pr_chunk *cnk = NULL; struct pr_chunk_x *clist = NULL; int max_pos; int ret = -1; VA_COPY(args, args_in); state = DP_S_DEFAULT; pfirst = 1; pflag = 0; pnum = 0; max_pos = 0; base = format; ch = *format++; /* retrieve the string structure as chunks */ while (state != DP_S_DONE) { if (ch == '\0') state = DP_S_DONE; switch(state) { case DP_S_DEFAULT: if (cnk) { cnk->next = new_chunk(); cnk = cnk->next; } else { cnk = new_chunk(); } if (!cnk) goto done; if (!chunks) chunks = cnk; if (ch == '%') { state = DP_S_FLAGS; ch = *format++; } else { cnk->type = CNK_FMT_STR; cnk->start = format - base -1; while ((ch != '\0') && (ch != '%')) ch = *format++; cnk->len = format - base - cnk->start -1; } break; case DP_S_FLAGS: switch (ch) { case '-': cnk->flags |= DP_F_MINUS; ch = *format++; break; case '+': cnk->flags |= DP_F_PLUS; ch = *format++; break; case ' ': cnk->flags |= DP_F_SPACE; ch = *format++; break; case '#': cnk->flags |= DP_F_NUM; ch = *format++; break; case '0': cnk->flags |= DP_F_ZERO; ch = *format++; break; case 'I': /* internationalization not supported yet */ ch = *format++; break; default: state = DP_S_MIN; break; } break; case DP_S_MIN: if (isdigit((unsigned char)ch)) { cnk->min = 10 * cnk->min + char_to_int (ch); ch = *format++; } else if (ch == '$') { if (!pfirst && !pflag) { /* parameters must be all positioned or none */ goto done; } if (pfirst) { pfirst = 0; pflag = 1; } if (cnk->min == 0) /* what ?? */ goto done; cnk->num = cnk->min; cnk->min = 0; ch = *format++; } else if (ch == '*') { if (pfirst) pfirst = 0; cnk->min_star = new_chunk(); if (!cnk->min_star) /* out of memory :-( */ goto done; cnk->min_star->type = CNK_INT; if (pflag) { int num; ch = *format++; if (!isdigit((unsigned char)ch)) { /* parameters must be all positioned or none */ goto done; } for (num = 0; isdigit((unsigned char)ch); ch = *format++) { num = 10 * num + char_to_int(ch); } cnk->min_star->num = num; if (ch != '$') /* what ?? */ goto done; } else { cnk->min_star->num = ++pnum; } max_pos = add_cnk_list_entry(&clist, max_pos, cnk->min_star); if (max_pos == 0) /* out of memory :-( */ goto done; ch = *format++; state = DP_S_DOT; } else { if (pfirst) pfirst = 0; state = DP_S_DOT; } break; case DP_S_DOT: if (ch == '.') { state = DP_S_MAX; ch = *format++; } else { state = DP_S_MOD; } break; case DP_S_MAX: if (isdigit((unsigned char)ch)) { if (cnk->max < 0) cnk->max = 0; cnk->max = 10 * cnk->max + char_to_int (ch); ch = *format++; } else if (ch == '$') { if (!pfirst && !pflag) { /* parameters must be all positioned or none */ goto done; } if (cnk->max <= 0) /* what ?? */ goto done; cnk->num = cnk->max; cnk->max = -1; ch = *format++; } else if (ch == '*') { cnk->max_star = new_chunk(); if (!cnk->max_star) /* out of memory :-( */ goto done; cnk->max_star->type = CNK_INT; if (pflag) { int num; ch = *format++; if (!isdigit((unsigned char)ch)) { /* parameters must be all positioned or none */ goto done; } for (num = 0; isdigit((unsigned char)ch); ch = *format++) { num = 10 * num + char_to_int(ch); } cnk->max_star->num = num; if (ch != '$') /* what ?? */ goto done; } else { cnk->max_star->num = ++pnum; } max_pos = add_cnk_list_entry(&clist, max_pos, cnk->max_star); if (max_pos == 0) /* out of memory :-( */ goto done; ch = *format++; state = DP_S_MOD; } else { state = DP_S_MOD; } break; case DP_S_MOD: switch (ch) { case 'h': cnk->cflags = DP_C_SHORT; ch = *format++; if (ch == 'h') { cnk->cflags = DP_C_CHAR; ch = *format++; } break; case 'l': cnk->cflags = DP_C_LONG; ch = *format++; if (ch == 'l') { /* It's a long long */ cnk->cflags = DP_C_LLONG; ch = *format++; } break; case 'L': cnk->cflags = DP_C_LDOUBLE; ch = *format++; break; case 'z': cnk->cflags = DP_C_SIZET; ch = *format++; break; default: break; } state = DP_S_CONV; break; case DP_S_CONV: if (cnk->num == 0) cnk->num = ++pnum; max_pos = add_cnk_list_entry(&clist, max_pos, cnk); if (max_pos == 0) /* out of memory :-( */ goto done; switch (ch) { case 'd': case 'i': cnk->type = CNK_INT; break; case 'o': cnk->type = CNK_OCTAL; cnk->flags |= DP_F_UNSIGNED; break; case 'u': cnk->type = CNK_UINT; cnk->flags |= DP_F_UNSIGNED; break; case 'X': cnk->flags |= DP_F_UP; case 'x': cnk->type = CNK_HEX; cnk->flags |= DP_F_UNSIGNED; break; case 'A': /* hex float not supported yet */ case 'E': case 'G': case 'F': cnk->flags |= DP_F_UP; case 'a': /* hex float not supported yet */ case 'e': case 'f': case 'g': cnk->type = CNK_FLOAT; break; case 'c': cnk->type = CNK_CHAR; break; case 's': cnk->type = CNK_STRING; break; case 'p': cnk->type = CNK_PTR; cnk->flags |= DP_F_UNSIGNED; break; case 'n': cnk->type = CNK_NUM; break; case '%': cnk->type = CNK_PRCNT; break; default: /* Unknown, bail out*/ goto done; } ch = *format++; state = DP_S_DEFAULT; break; case DP_S_DONE: break; default: /* hmm? */ break; /* some picky compilers need this */ } } /* retrieve the format arguments */ for (pnum = 0; pnum < max_pos; pnum++) { int i; if (clist[pnum].num == 0) { /* ignoring a parameter should not be permitted * all parameters must be matched at least once * BUT seem some system ignore this rule ... * at least my glibc based system does --SSS */ #ifdef DEBUG_SNPRINTF printf("parameter at position %d not used\n", pnum+1); #endif /* eat the parameter */ va_arg (args, int); continue; } for (i = 1; i < clist[pnum].num; i++) { if (clist[pnum].chunks[0]->type != clist[pnum].chunks[i]->type) { /* nooo noo no! * all the references to a parameter * must be of the same type */ goto done; } } cnk = clist[pnum].chunks[0]; switch (cnk->type) { case CNK_INT: if (cnk->cflags == DP_C_SHORT) cnk->value = va_arg (args, int); else if (cnk->cflags == DP_C_LONG) cnk->value = va_arg (args, long int); else if (cnk->cflags == DP_C_LLONG) cnk->value = va_arg (args, LLONG); else if (cnk->cflags == DP_C_SIZET) cnk->value = va_arg (args, ssize_t); else cnk->value = va_arg (args, int); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->value = cnk->value; } break; case CNK_OCTAL: case CNK_UINT: case CNK_HEX: if (cnk->cflags == DP_C_SHORT) cnk->value = va_arg (args, unsigned int); else if (cnk->cflags == DP_C_LONG) cnk->value = (unsigned long int)va_arg (args, unsigned long int); else if (cnk->cflags == DP_C_LLONG) cnk->value = (LLONG)va_arg (args, unsigned LLONG); else if (cnk->cflags == DP_C_SIZET) cnk->value = (size_t)va_arg (args, size_t); else cnk->value = (unsigned int)va_arg (args, unsigned int); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->value = cnk->value; } break; case CNK_FLOAT: if (cnk->cflags == DP_C_LDOUBLE) cnk->fvalue = va_arg (args, LDOUBLE); else cnk->fvalue = va_arg (args, double); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->fvalue = cnk->fvalue; } break; case CNK_CHAR: cnk->value = va_arg (args, int); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->value = cnk->value; } break; case CNK_STRING: cnk->strvalue = va_arg (args, char *); if (!cnk->strvalue) cnk->strvalue = "(NULL)"; for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->strvalue = cnk->strvalue; } break; case CNK_PTR: cnk->strvalue = va_arg (args, void *); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->strvalue = cnk->strvalue; } break; case CNK_NUM: if (cnk->cflags == DP_C_CHAR) cnk->pnum = va_arg (args, char *); else if (cnk->cflags == DP_C_SHORT) cnk->pnum = va_arg (args, short int *); else if (cnk->cflags == DP_C_LONG) cnk->pnum = va_arg (args, long int *); else if (cnk->cflags == DP_C_LLONG) cnk->pnum = va_arg (args, LLONG *); else if (cnk->cflags == DP_C_SIZET) cnk->pnum = va_arg (args, ssize_t *); else cnk->pnum = va_arg (args, int *); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->pnum = cnk->pnum; } break; case CNK_PRCNT: break; default: /* what ?? */ goto done; } } /* print out the actual string from chunks */ currlen = 0; cnk = chunks; while (cnk) { int len, min, max; if (cnk->min_star) min = cnk->min_star->value; else min = cnk->min; if (cnk->max_star) max = cnk->max_star->value; else max = cnk->max; switch (cnk->type) { case CNK_FMT_STR: if (maxlen != 0 && maxlen > currlen) { if (maxlen > (currlen + cnk->len)) len = cnk->len; else len = maxlen - currlen; memcpy(&(buffer[currlen]), &(base[cnk->start]), len); } currlen += cnk->len; break; case CNK_INT: case CNK_UINT: fmtint (buffer, &currlen, maxlen, cnk->value, 10, min, max, cnk->flags); break; case CNK_OCTAL: fmtint (buffer, &currlen, maxlen, cnk->value, 8, min, max, cnk->flags); break; case CNK_HEX: fmtint (buffer, &currlen, maxlen, cnk->value, 16, min, max, cnk->flags); break; case CNK_FLOAT: fmtfp (buffer, &currlen, maxlen, cnk->fvalue, min, max, cnk->flags); break; case CNK_CHAR: dopr_outch (buffer, &currlen, maxlen, cnk->value); break; case CNK_STRING: if (max == -1) { max = strlen(cnk->strvalue); } fmtstr (buffer, &currlen, maxlen, cnk->strvalue, cnk->flags, min, max); break; case CNK_PTR: fmtint (buffer, &currlen, maxlen, (long)(cnk->strvalue), 16, min, max, cnk->flags); break; case CNK_NUM: if (cnk->cflags == DP_C_CHAR) *((char *)(cnk->pnum)) = (char)currlen; else if (cnk->cflags == DP_C_SHORT) *((short int *)(cnk->pnum)) = (short int)currlen; else if (cnk->cflags == DP_C_LONG) *((long int *)(cnk->pnum)) = (long int)currlen; else if (cnk->cflags == DP_C_LLONG) *((LLONG *)(cnk->pnum)) = (LLONG)currlen; else if (cnk->cflags == DP_C_SIZET) *((ssize_t *)(cnk->pnum)) = (ssize_t)currlen; else *((int *)(cnk->pnum)) = (int)currlen; break; case CNK_PRCNT: dopr_outch (buffer, &currlen, maxlen, '%'); break; default: /* what ?? */ goto done; } cnk = cnk->next; } if (maxlen != 0) { if (currlen < maxlen - 1) buffer[currlen] = '\0'; else if (maxlen > 0) buffer[maxlen - 1] = '\0'; } ret = currlen; done: va_end(args); while (chunks) { cnk = chunks->next; free(chunks); chunks = cnk; } if (clist) { for (pnum = 0; pnum < max_pos; pnum++) { if (clist[pnum].chunks) free(clist[pnum].chunks); } free(clist); } return ret; } static void fmtstr(char *buffer, size_t *currlen, size_t maxlen, char *value, int flags, int min, int max) { int padlen, strln; /* amount to pad */ int cnt = 0; #ifdef DEBUG_SNPRINTF printf("fmtstr min=%d max=%d s=[%s]\n", min, max, value); #endif if (value == 0) { value = ""; } for (strln = 0; strln < max && value[strln]; ++strln); /* strlen */ padlen = min - strln; if (padlen < 0) padlen = 0; if (flags & DP_F_MINUS) padlen = -padlen; /* Left Justify */ while (padlen > 0) { dopr_outch (buffer, currlen, maxlen, ' '); --padlen; } while (*value && (cnt < max)) { dopr_outch (buffer, currlen, maxlen, *value++); ++cnt; } while (padlen < 0) { dopr_outch (buffer, currlen, maxlen, ' '); ++padlen; } } /* Have to handle DP_F_NUM (ie 0x and 0 alternates) */ static void fmtint(char *buffer, size_t *currlen, size_t maxlen, LLONG value, int base, int min, int max, int flags) { int signvalue = 0; unsigned LLONG uvalue; char convert[20]; int place = 0; int spadlen = 0; /* amount to space pad */ int zpadlen = 0; /* amount to zero pad */ int caps = 0; if (max < 0) max = 0; uvalue = value; if(!(flags & DP_F_UNSIGNED)) { if( value < 0 ) { signvalue = '-'; uvalue = -value; } else { if (flags & DP_F_PLUS) /* Do a sign (+/i) */ signvalue = '+'; else if (flags & DP_F_SPACE) signvalue = ' '; } } if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */ do { convert[place++] = (caps? "0123456789ABCDEF":"0123456789abcdef") [uvalue % (unsigned)base ]; uvalue = (uvalue / (unsigned)base ); } while(uvalue && (place < 20)); if (place == 20) place--; convert[place] = 0; zpadlen = max - place; spadlen = min - MAX (max, place) - (signvalue ? 1 : 0); if (zpadlen < 0) zpadlen = 0; if (spadlen < 0) spadlen = 0; if (flags & DP_F_ZERO) { zpadlen = MAX(zpadlen, spadlen); spadlen = 0; } if (flags & DP_F_MINUS) spadlen = -spadlen; /* Left Justifty */ #ifdef DEBUG_SNPRINTF printf("zpad: %d, spad: %d, min: %d, max: %d, place: %d\n", zpadlen, spadlen, min, max, place); #endif /* Spaces */ while (spadlen > 0) { dopr_outch (buffer, currlen, maxlen, ' '); --spadlen; } /* Sign */ if (signvalue) dopr_outch (buffer, currlen, maxlen, signvalue); /* Zeros */ if (zpadlen > 0) { while (zpadlen > 0) { dopr_outch (buffer, currlen, maxlen, '0'); --zpadlen; } } /* Digits */ while (place > 0) dopr_outch (buffer, currlen, maxlen, convert[--place]); /* Left Justified spaces */ while (spadlen < 0) { dopr_outch (buffer, currlen, maxlen, ' '); ++spadlen; } } static LDOUBLE abs_val(LDOUBLE value) { LDOUBLE result = value; if (value < 0) result = -value; return result; } static LDOUBLE POW10(int exp) { LDOUBLE result = 1; while (exp) { result *= 10; exp--; } return result; } static LLONG ROUND(LDOUBLE value) { LLONG intpart; intpart = (LLONG)value; value = value - intpart; if (value >= 0.5) intpart++; return intpart; } /* a replacement for modf that doesn't need the math library. Should be portable, but slow */ static double my_modf(double x0, double *iptr) { int i; LLONG l=0; double x = x0; double f = 1.0; for (i=0;i<100;i++) { l = (long)x; if (l <= (x+1) && l >= (x-1)) break; x *= 0.1; f *= 10.0; } if (i == 100) { /* yikes! the number is beyond what we can handle. What do we do? */ (*iptr) = 0; return 0; } if (i != 0) { double i2; double ret; ret = my_modf(x0-l*f, &i2); (*iptr) = l*f + i2; return ret; } (*iptr) = l; return x - (*iptr); } static void fmtfp (char *buffer, size_t *currlen, size_t maxlen, LDOUBLE fvalue, int min, int max, int flags) { int signvalue = 0; double ufvalue; char iconvert[311]; char fconvert[311]; int iplace = 0; int fplace = 0; int padlen = 0; /* amount to pad */ int zpadlen = 0; int caps = 0; int idx; double intpart; double fracpart; double temp; /* * AIX manpage says the default is 0, but Solaris says the default * is 6, and sprintf on AIX defaults to 6 */ if (max < 0) max = 6; ufvalue = abs_val (fvalue); if (fvalue < 0) { signvalue = '-'; } else { if (flags & DP_F_PLUS) { /* Do a sign (+/i) */ signvalue = '+'; } else { if (flags & DP_F_SPACE) signvalue = ' '; } } #if 0 if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */ #endif #if 0 if (max == 0) ufvalue += 0.5; /* if max = 0 we must round */ #endif /* * Sorry, we only support 9 digits past the decimal because of our * conversion method */ if (max > 9) max = 9; /* We "cheat" by converting the fractional part to integer by * multiplying by a factor of 10 */ temp = ufvalue; my_modf(temp, &intpart); fracpart = ROUND((POW10(max)) * (ufvalue - intpart)); if (fracpart >= POW10(max)) { intpart++; fracpart -= POW10(max); } /* Convert integer part */ do { temp = intpart*0.1; my_modf(temp, &intpart); idx = (int) ((temp -intpart +0.05)* 10.0); /* idx = (int) (((double)(temp*0.1) -intpart +0.05) *10.0); */ /* printf ("%llf, %f, %x\n", temp, intpart, idx); */ iconvert[iplace++] = (caps? "0123456789ABCDEF":"0123456789abcdef")[idx]; } while (intpart && (iplace < 311)); if (iplace == 311) iplace--; iconvert[iplace] = 0; /* Convert fractional part */ if (fracpart) { do { temp = fracpart*0.1; my_modf(temp, &fracpart); idx = (int) ((temp -fracpart +0.05)* 10.0); /* idx = (int) ((((temp/10) -fracpart) +0.05) *10); */ /* printf ("%lf, %lf, %ld\n", temp, fracpart, idx ); */ fconvert[fplace++] = (caps? "0123456789ABCDEF":"0123456789abcdef")[idx]; } while(fracpart && (fplace < 311)); if (fplace == 311) fplace--; } fconvert[fplace] = 0; /* -1 for decimal point, another -1 if we are printing a sign */ padlen = min - iplace - max - 1 - ((signvalue) ? 1 : 0); zpadlen = max - fplace; if (zpadlen < 0) zpadlen = 0; if (padlen < 0) padlen = 0; if (flags & DP_F_MINUS) padlen = -padlen; /* Left Justifty */ if ((flags & DP_F_ZERO) && (padlen > 0)) { if (signvalue) { dopr_outch (buffer, currlen, maxlen, signvalue); --padlen; signvalue = 0; } while (padlen > 0) { dopr_outch (buffer, currlen, maxlen, '0'); --padlen; } } while (padlen > 0) { dopr_outch (buffer, currlen, maxlen, ' '); --padlen; } if (signvalue) dopr_outch (buffer, currlen, maxlen, signvalue); while (iplace > 0) dopr_outch (buffer, currlen, maxlen, iconvert[--iplace]); #ifdef DEBUG_SNPRINTF printf("fmtfp: fplace=%d zpadlen=%d\n", fplace, zpadlen); #endif /* * Decimal point. This should probably use locale to find the correct * char to print out. */ if (max > 0) { dopr_outch (buffer, currlen, maxlen, '.'); while (zpadlen > 0) { dopr_outch (buffer, currlen, maxlen, '0'); --zpadlen; } while (fplace > 0) dopr_outch (buffer, currlen, maxlen, fconvert[--fplace]); } while (padlen < 0) { dopr_outch (buffer, currlen, maxlen, ' '); ++padlen; } } static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c) { if (*currlen < maxlen) { buffer[(*currlen)] = c; } (*currlen)++; } static struct pr_chunk *new_chunk(void) { struct pr_chunk *new_c = (struct pr_chunk *)malloc(sizeof(struct pr_chunk)); if (!new_c) return NULL; new_c->type = 0; new_c->num = 0; new_c->min = 0; new_c->min_star = NULL; new_c->max = -1; new_c->max_star = NULL; new_c->flags = 0; new_c->cflags = 0; new_c->start = 0; new_c->len = 0; new_c->value = 0; new_c->fvalue = 0; new_c->strvalue = NULL; new_c->pnum = NULL; new_c->next = NULL; return new_c; } static int add_cnk_list_entry(struct pr_chunk_x **list, int max_num, struct pr_chunk *chunk) { struct pr_chunk_x *l; struct pr_chunk **c; int max; int cnum; int i, pos; if (chunk->num > max_num) { max = chunk->num; if (*list == NULL) { l = (struct pr_chunk_x *)malloc(sizeof(struct pr_chunk_x) * max); pos = 0; } else { l = (struct pr_chunk_x *)realloc(*list, sizeof(struct pr_chunk_x) * max); pos = max_num; } if (l == NULL) { for (i = 0; i < max; i++) { if ((*list)[i].chunks) free((*list)[i].chunks); } return 0; } for (i = pos; i < max; i++) { l[i].chunks = NULL; l[i].num = 0; } } else { l = *list; max = max_num; } i = chunk->num - 1; cnum = l[i].num + 1; if (l[i].chunks == NULL) { c = (struct pr_chunk **)malloc(sizeof(struct pr_chunk *) * cnum); } else { c = (struct pr_chunk **)realloc(l[i].chunks, sizeof(struct pr_chunk *) * cnum); } if (c == NULL) { for (i = 0; i < max; i++) { if (l[i].chunks) free(l[i].chunks); } return 0; } c[l[i].num] = chunk; l[i].chunks = c; l[i].num = cnum; *list = l; return max; } int rep_vsnprintf (char *str, size_t count, const char *fmt, va_list args) { return dopr(str, count, fmt, args); } #endif /* yes this really must be a ||. Don't muck with this (tridge) * * The logic for these two is that we need our own definition if the * OS *either* has no definition of *sprintf, or if it does have one * that doesn't work properly according to the autoconf test. */ #if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_snprintf(char *str,size_t count,const char *fmt,...) { size_t ret; va_list ap; va_start(ap, fmt); ret = vsnprintf(str, count, fmt, ap); va_end(ap); return ret; } #endif #ifndef HAVE_C99_VSNPRINTF int rep_printf(const char *fmt, ...) { va_list ap; int ret; char *s; s = NULL; va_start(ap, fmt); ret = vasprintf(&s, fmt, ap); va_end(ap); if (s) { fwrite(s, 1, strlen(s), stdout); } free(s); return ret; } #endif #ifndef HAVE_C99_VSNPRINTF int rep_fprintf(FILE *stream, const char *fmt, ...) { va_list ap; int ret; char *s; s = NULL; va_start(ap, fmt); ret = vasprintf(&s, fmt, ap); va_end(ap); if (s) { fwrite(s, 1, strlen(s), stream); } free(s); return ret; } #endif #endif #if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_vasprintf(char **ptr, const char *format, va_list ap) { int ret; va_list ap2; VA_COPY(ap2, ap); ret = vsnprintf(NULL, 0, format, ap2); va_end(ap2); if (ret < 0) return ret; (*ptr) = (char *)malloc(ret+1); if (!*ptr) return -1; VA_COPY(ap2, ap); ret = vsnprintf(*ptr, ret+1, format, ap2); va_end(ap2); return ret; } #endif #if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_asprintf(char **ptr, const char *format, ...) { va_list ap; int ret; *ptr = NULL; va_start(ap, format); ret = vasprintf(ptr, format, ap); va_end(ap); return ret; } #endif #ifdef TEST_SNPRINTF int sprintf(char *str,const char *fmt,...); int printf(const char *fmt,...); int main (void) { char buf1[1024]; char buf2[1024]; char *buf3; char *fp_fmt[] = { "%1.1f", "%-1.5f", "%1.5f", "%123.9f", "%10.5f", "% 10.5f", "%+22.9f", "%+4.9f", "%01.3f", "%4f", "%3.1f", "%3.2f", "%.0f", "%f", "%-8.8f", "%-9.9f", NULL }; double fp_nums[] = { 6442452944.1234, -1.5, 134.21, 91340.2, 341.1234, 203.9, 0.96, 0.996, 0.9996, 1.996, 4.136, 5.030201, 0.00205, /* END LIST */ 0}; char *int_fmt[] = { "%-1.5d", "%1.5d", "%123.9d", "%5.5d", "%10.5d", "% 10.5d", "%+22.33d", "%01.3d", "%4d", "%d", NULL }; long int_nums[] = { -1, 134, 91340, 341, 0203, 1234567890, 0}; char *str_fmt[] = { "%10.5s", "%-10.5s", "%5.10s", "%-5.10s", "%10.1s", "%0.10s", "%10.0s", "%1.10s", "%s", "%.1s", "%.10s", "%10s", NULL }; char *str_vals[] = {"hello", "a", "", "a longer string", NULL}; #ifdef HAVE_LONG_LONG char *ll_fmt[] = { "%llu", NULL }; LLONG ll_nums[] = { 134, 91340, 341, 0203, 1234567890, 128006186140000000LL, 0}; #endif int x, y; int fail = 0; int num = 0; int l1, l2; char *ss_fmt[] = { "%zd", "%zu", NULL }; size_t ss_nums[] = {134, 91340, 123456789, 0203, 1234567890, 0}; printf ("Testing snprintf format codes against system sprintf...\n"); for (x = 0; fp_fmt[x] ; x++) { for (y = 0; fp_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), fp_fmt[x], fp_nums[y]); l2 = sprintf (buf2, fp_fmt[x], fp_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", fp_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } for (x = 0; int_fmt[x] ; x++) { for (y = 0; int_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), int_fmt[x], int_nums[y]); l2 = sprintf (buf2, int_fmt[x], int_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", int_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } for (x = 0; str_fmt[x] ; x++) { for (y = 0; str_vals[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), str_fmt[x], str_vals[y]); l2 = sprintf (buf2, str_fmt[x], str_vals[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", str_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } #ifdef HAVE_LONG_LONG for (x = 0; ll_fmt[x] ; x++) { for (y = 0; ll_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), ll_fmt[x], ll_nums[y]); l2 = sprintf (buf2, ll_fmt[x], ll_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", ll_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } #endif #define BUFSZ 2048 buf1[0] = buf2[0] = '\0'; if ((buf3 = malloc(BUFSZ)) == NULL) { fail++; } else { num++; memset(buf3, 'a', BUFSZ); snprintf(buf1, sizeof(buf1), "%.*s", 1, buf3); buf1[1023] = '\0'; if (strcmp(buf1, "a") != 0) { printf("length limit buf1 '%s' expected 'a'\n", buf1); fail++; } } buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9); l2 = sprintf(buf2, "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2); fail++; } buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9); l2 = sprintf(buf2, "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2); fail++; } for (x = 0; ss_fmt[x] ; x++) { for (y = 0; ss_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), ss_fmt[x], ss_nums[y]); l2 = sprintf (buf2, ss_fmt[x], ss_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", ss_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } #if 0 buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%lld", (LLONG)1234567890); l2 = sprintf(buf2, "%lld", (LLONG)1234567890); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%lld", l1, buf1, l2, buf2); fail++; } buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%Lf", (LDOUBLE)890.1234567890123); l2 = sprintf(buf2, "%Lf", (LDOUBLE)890.1234567890123); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%Lf", l1, buf1, l2, buf2); fail++; } #endif printf ("%d tests failed out of %d.\n", fail, num); printf("seeing how many digits we support\n"); { double v0 = 0.12345678901234567890123456789012345678901; for (x=0; x<100; x++) { double p = pow(10, x); double r = v0*p; snprintf(buf1, sizeof(buf1), "%1.1f", r); sprintf(buf2, "%1.1f", r); if (strcmp(buf1, buf2)) { printf("we seem to support %d digits\n", x-1); break; } } } return 0; } #endif /* TEST_SNPRINTF */ ntdb-1.0/lib/replace/socket.c000066400000000000000000000023071224151530700161200ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * * Dummy replacements for socket functions. * * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" int rep_connect(int sockfd, const struct sockaddr *serv_addr, socklen_t addrlen) { errno = ENOSYS; return -1; } struct hostent *rep_gethostbyname(const char *name) { errno = ENOSYS; return NULL; } ntdb-1.0/lib/replace/socketpair.c000066400000000000000000000025011224151530700167700ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * replacement routines for broken systems * Copyright (C) Jelmer Vernooij 2006 * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" int rep_socketpair(int d, int type, int protocol, int sv[2]) { if (d != AF_UNIX) { errno = EAFNOSUPPORT; return -1; } if (protocol != 0) { errno = EPROTONOSUPPORT; return -1; } if (type != SOCK_STREAM) { errno = EOPNOTSUPP; return -1; } return pipe(sv); } ntdb-1.0/lib/replace/strptime.c000066400000000000000000000567551224151530700165170ustar00rootroot00000000000000/* Convert a string representation of time to a time value. Copyright (C) 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper , 1996. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; see the file COPYING.LIB. If not, see . */ /* XXX This version of the implementation is not really complete. Some of the fields cannot add information alone. But if seeing some of them in the same format (such as year, week and weekday) this is enough information for determining the date. */ #include "replace.h" #include "system/locale.h" #include "system/time.h" #ifndef __P # if defined (__GNUC__) || (defined (__STDC__) && __STDC__) # define __P(args) args # else # define __P(args) () # endif /* GCC. */ #endif /* Not __P. */ #if ! HAVE_LOCALTIME_R && ! defined localtime_r # ifdef _LIBC # define localtime_r __localtime_r # else /* Approximate localtime_r as best we can in its absence. */ # define localtime_r my_localtime_r static struct tm *localtime_r __P ((const time_t *, struct tm *)); static struct tm * localtime_r (t, tp) const time_t *t; struct tm *tp; { struct tm *l = localtime (t); if (! l) return 0; *tp = *l; return tp; } # endif /* ! _LIBC */ #endif /* ! HAVE_LOCALTIME_R && ! defined (localtime_r) */ #define match_char(ch1, ch2) if (ch1 != ch2) return NULL #if defined __GNUC__ && __GNUC__ >= 2 # define match_string(cs1, s2) \ ({ size_t len = strlen (cs1); \ int result = strncasecmp ((cs1), (s2), len) == 0; \ if (result) (s2) += len; \ result; }) #else /* Oh come on. Get a reasonable compiler. */ # define match_string(cs1, s2) \ (strncasecmp ((cs1), (s2), strlen (cs1)) ? 0 : ((s2) += strlen (cs1), 1)) #endif /* We intentionally do not use isdigit() for testing because this will lead to problems with the wide character version. */ #define get_number(from, to, n) \ do { \ int __n = n; \ val = 0; \ while (*rp == ' ') \ ++rp; \ if (*rp < '0' || *rp > '9') \ return NULL; \ do { \ val *= 10; \ val += *rp++ - '0'; \ } while (--__n > 0 && val * 10 <= to && *rp >= '0' && *rp <= '9'); \ if (val < from || val > to) \ return NULL; \ } while (0) #ifdef _NL_CURRENT # define get_alt_number(from, to, n) \ ({ \ __label__ do_normal; \ if (*decided != raw) \ { \ const char *alts = _NL_CURRENT (LC_TIME, ALT_DIGITS); \ int __n = n; \ int any = 0; \ while (*rp == ' ') \ ++rp; \ val = 0; \ do { \ val *= 10; \ while (*alts != '\0') \ { \ size_t len = strlen (alts); \ if (strncasecmp (alts, rp, len) == 0) \ break; \ alts += len + 1; \ ++val; \ } \ if (*alts == '\0') \ { \ if (*decided == not && ! any) \ goto do_normal; \ /* If we haven't read anything it's an error. */ \ if (! any) \ return NULL; \ /* Correct the premature multiplication. */ \ val /= 10; \ break; \ } \ else \ *decided = loc; \ } while (--__n > 0 && val * 10 <= to); \ if (val < from || val > to) \ return NULL; \ } \ else \ { \ do_normal: \ get_number (from, to, n); \ } \ 0; \ }) #else # define get_alt_number(from, to, n) \ /* We don't have the alternate representation. */ \ get_number(from, to, n) #endif #define recursive(new_fmt) \ (*(new_fmt) != '\0' \ && (rp = strptime_internal (rp, (new_fmt), tm, decided, era_cnt)) != NULL) #ifdef _LIBC /* This is defined in locale/C-time.c in the GNU libc. */ extern const struct locale_data _nl_C_LC_TIME; extern const unsigned short int __mon_yday[2][13]; # define weekday_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (DAY_1)].string) # define ab_weekday_name \ (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABDAY_1)].string) # define month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (MON_1)].string) # define ab_month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABMON_1)].string) # define HERE_D_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_T_FMT)].string) # define HERE_D_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_FMT)].string) # define HERE_AM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (AM_STR)].string) # define HERE_PM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (PM_STR)].string) # define HERE_T_FMT_AMPM \ (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT_AMPM)].string) # define HERE_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT)].string) # define strncasecmp(s1, s2, n) __strncasecmp (s1, s2, n) #else static char const weekday_name[][10] = { "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" }; static char const ab_weekday_name[][4] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; static char const month_name[][10] = { "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" }; static char const ab_month_name[][4] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; # define HERE_D_T_FMT "%a %b %e %H:%M:%S %Y" # define HERE_D_FMT "%m/%d/%y" # define HERE_AM_STR "AM" # define HERE_PM_STR "PM" # define HERE_T_FMT_AMPM "%I:%M:%S %p" # define HERE_T_FMT "%H:%M:%S" static const unsigned short int __mon_yday[2][13] = { /* Normal years. */ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, /* Leap years. */ { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } }; #endif /* Status of lookup: do we use the locale data or the raw data? */ enum locale_status { not, loc, raw }; #ifndef __isleap /* Nonzero if YEAR is a leap year (every 4 years, except every 100th isn't, and every 400th is). */ # define __isleap(year) \ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) #endif /* Compute the day of the week. */ static void day_of_the_week (struct tm *tm) { /* We know that January 1st 1970 was a Thursday (= 4). Compute the the difference between this data in the one on TM and so determine the weekday. */ int corr_year = 1900 + tm->tm_year - (tm->tm_mon < 2); int wday = (-473 + (365 * (tm->tm_year - 70)) + (corr_year / 4) - ((corr_year / 4) / 25) + ((corr_year / 4) % 25 < 0) + (((corr_year / 4) / 25) / 4) + __mon_yday[0][tm->tm_mon] + tm->tm_mday - 1); tm->tm_wday = ((wday % 7) + 7) % 7; } /* Compute the day of the year. */ static void day_of_the_year (struct tm *tm) { tm->tm_yday = (__mon_yday[__isleap (1900 + tm->tm_year)][tm->tm_mon] + (tm->tm_mday - 1)); } static char * #ifdef _LIBC internal_function #endif strptime_internal __P ((const char *rp, const char *fmt, struct tm *tm, enum locale_status *decided, int era_cnt)); static char * #ifdef _LIBC internal_function #endif strptime_internal (rp, fmt, tm, decided, era_cnt) const char *rp; const char *fmt; struct tm *tm; enum locale_status *decided; int era_cnt; { int cnt; size_t val; int have_I, is_pm; int century, want_century; int want_era; int have_wday, want_xday; int have_yday; int have_mon, have_mday; #ifdef _NL_CURRENT const char *rp_backup; size_t num_eras; struct era_entry *era; era = NULL; #endif have_I = is_pm = 0; century = -1; want_century = 0; want_era = 0; have_wday = want_xday = have_yday = have_mon = have_mday = 0; while (*fmt != '\0') { /* A white space in the format string matches 0 more or white space in the input string. */ if (isspace (*fmt)) { while (isspace (*rp)) ++rp; ++fmt; continue; } /* Any character but `%' must be matched by the same character in the iput string. */ if (*fmt != '%') { match_char (*fmt++, *rp++); continue; } ++fmt; #ifndef _NL_CURRENT /* We need this for handling the `E' modifier. */ start_over: #endif #ifdef _NL_CURRENT /* Make back up of current processing pointer. */ rp_backup = rp; #endif switch (*fmt++) { case '%': /* Match the `%' character itself. */ match_char ('%', *rp++); break; case 'a': case 'A': /* Match day of week. */ for (cnt = 0; cnt < 7; ++cnt) { #ifdef _NL_CURRENT if (*decided !=raw) { if (match_string (_NL_CURRENT (LC_TIME, DAY_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, DAY_1 + cnt), weekday_name[cnt])) *decided = loc; break; } if (match_string (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt), ab_weekday_name[cnt])) *decided = loc; break; } } #endif if (*decided != loc && (match_string (weekday_name[cnt], rp) || match_string (ab_weekday_name[cnt], rp))) { *decided = raw; break; } } if (cnt == 7) /* Does not match a weekday name. */ return NULL; tm->tm_wday = cnt; have_wday = 1; break; case 'b': case 'B': case 'h': /* Match month name. */ for (cnt = 0; cnt < 12; ++cnt) { #ifdef _NL_CURRENT if (*decided !=raw) { if (match_string (_NL_CURRENT (LC_TIME, MON_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, MON_1 + cnt), month_name[cnt])) *decided = loc; break; } if (match_string (_NL_CURRENT (LC_TIME, ABMON_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, ABMON_1 + cnt), ab_month_name[cnt])) *decided = loc; break; } } #endif if (match_string (month_name[cnt], rp) || match_string (ab_month_name[cnt], rp)) { *decided = raw; break; } } if (cnt == 12) /* Does not match a month name. */ return NULL; tm->tm_mon = cnt; want_xday = 1; break; case 'c': /* Match locale's date and time format. */ #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, D_T_FMT))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, D_T_FMT), HERE_D_T_FMT)) *decided = loc; want_xday = 1; break; } *decided = raw; } #endif if (!recursive (HERE_D_T_FMT)) return NULL; want_xday = 1; break; case 'C': /* Match century number. */ #ifdef _NL_CURRENT match_century: #endif get_number (0, 99, 2); century = val; want_xday = 1; break; case 'd': case 'e': /* Match day of month. */ get_number (1, 31, 2); tm->tm_mday = val; have_mday = 1; want_xday = 1; break; case 'F': if (!recursive ("%Y-%m-%d")) return NULL; want_xday = 1; break; case 'x': #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, D_FMT))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, D_FMT), HERE_D_FMT)) *decided = loc; want_xday = 1; break; } *decided = raw; } #endif /* Fall through. */ case 'D': /* Match standard day format. */ if (!recursive (HERE_D_FMT)) return NULL; want_xday = 1; break; case 'k': case 'H': /* Match hour in 24-hour clock. */ get_number (0, 23, 2); tm->tm_hour = val; have_I = 0; break; case 'I': /* Match hour in 12-hour clock. */ get_number (1, 12, 2); tm->tm_hour = val % 12; have_I = 1; break; case 'j': /* Match day number of year. */ get_number (1, 366, 3); tm->tm_yday = val - 1; have_yday = 1; break; case 'm': /* Match number of month. */ get_number (1, 12, 2); tm->tm_mon = val - 1; have_mon = 1; want_xday = 1; break; case 'M': /* Match minute. */ get_number (0, 59, 2); tm->tm_min = val; break; case 'n': case 't': /* Match any white space. */ while (isspace (*rp)) ++rp; break; case 'p': /* Match locale's equivalent of AM/PM. */ #ifdef _NL_CURRENT if (*decided != raw) { if (match_string (_NL_CURRENT (LC_TIME, AM_STR), rp)) { if (strcmp (_NL_CURRENT (LC_TIME, AM_STR), HERE_AM_STR)) *decided = loc; break; } if (match_string (_NL_CURRENT (LC_TIME, PM_STR), rp)) { if (strcmp (_NL_CURRENT (LC_TIME, PM_STR), HERE_PM_STR)) *decided = loc; is_pm = 1; break; } *decided = raw; } #endif if (!match_string (HERE_AM_STR, rp)) { if (match_string (HERE_PM_STR, rp)) { is_pm = 1; } else { return NULL; } } break; case 'r': #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, T_FMT_AMPM))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, T_FMT_AMPM), HERE_T_FMT_AMPM)) *decided = loc; break; } *decided = raw; } #endif if (!recursive (HERE_T_FMT_AMPM)) return NULL; break; case 'R': if (!recursive ("%H:%M")) return NULL; break; case 's': { /* The number of seconds may be very high so we cannot use the `get_number' macro. Instead read the number character for character and construct the result while doing this. */ time_t secs = 0; if (*rp < '0' || *rp > '9') /* We need at least one digit. */ return NULL; do { secs *= 10; secs += *rp++ - '0'; } while (*rp >= '0' && *rp <= '9'); if (localtime_r (&secs, tm) == NULL) /* Error in function. */ return NULL; } break; case 'S': get_number (0, 61, 2); tm->tm_sec = val; break; case 'X': #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, T_FMT))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (_NL_CURRENT (LC_TIME, T_FMT), HERE_T_FMT)) *decided = loc; break; } *decided = raw; } #endif /* Fall through. */ case 'T': if (!recursive (HERE_T_FMT)) return NULL; break; case 'u': get_number (1, 7, 1); tm->tm_wday = val % 7; have_wday = 1; break; case 'g': get_number (0, 99, 2); /* XXX This cannot determine any field in TM. */ break; case 'G': if (*rp < '0' || *rp > '9') return NULL; /* XXX Ignore the number since we would need some more information to compute a real date. */ do ++rp; while (*rp >= '0' && *rp <= '9'); break; case 'U': case 'V': case 'W': get_number (0, 53, 2); /* XXX This cannot determine any field in TM without some information. */ break; case 'w': /* Match number of weekday. */ get_number (0, 6, 1); tm->tm_wday = val; have_wday = 1; break; case 'y': #ifdef _NL_CURRENT match_year_in_century: #endif /* Match year within century. */ get_number (0, 99, 2); /* The "Year 2000: The Millennium Rollover" paper suggests that values in the range 69-99 refer to the twentieth century. */ tm->tm_year = val >= 69 ? val : val + 100; /* Indicate that we want to use the century, if specified. */ want_century = 1; want_xday = 1; break; case 'Y': /* Match year including century number. */ get_number (0, 9999, 4); tm->tm_year = val - 1900; want_century = 0; want_xday = 1; break; case 'Z': /* XXX How to handle this? */ break; case 'E': #ifdef _NL_CURRENT switch (*fmt++) { case 'c': /* Match locale's alternate date and time format. */ if (*decided != raw) { const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_T_FMT); if (*fmt == '\0') fmt = _NL_CURRENT (LC_TIME, D_T_FMT); if (!recursive (fmt)) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (fmt, HERE_D_T_FMT)) *decided = loc; want_xday = 1; break; } *decided = raw; } /* The C locale has no era information, so use the normal representation. */ if (!recursive (HERE_D_T_FMT)) return NULL; want_xday = 1; break; case 'C': if (*decided != raw) { if (era_cnt >= 0) { era = _nl_select_era_entry (era_cnt); if (match_string (era->era_name, rp)) { *decided = loc; break; } else return NULL; } else { num_eras = _NL_CURRENT_WORD (LC_TIME, _NL_TIME_ERA_NUM_ENTRIES); for (era_cnt = 0; era_cnt < (int) num_eras; ++era_cnt, rp = rp_backup) { era = _nl_select_era_entry (era_cnt); if (match_string (era->era_name, rp)) { *decided = loc; break; } } if (era_cnt == (int) num_eras) { era_cnt = -1; if (*decided == loc) return NULL; } else break; } *decided = raw; } /* The C locale has no era information, so use the normal representation. */ goto match_century; case 'y': if (*decided == raw) goto match_year_in_century; get_number(0, 9999, 4); tm->tm_year = val; want_era = 1; want_xday = 1; break; case 'Y': if (*decided != raw) { num_eras = _NL_CURRENT_WORD (LC_TIME, _NL_TIME_ERA_NUM_ENTRIES); for (era_cnt = 0; era_cnt < (int) num_eras; ++era_cnt, rp = rp_backup) { era = _nl_select_era_entry (era_cnt); if (recursive (era->era_format)) break; } if (era_cnt == (int) num_eras) { era_cnt = -1; if (*decided == loc) return NULL; else rp = rp_backup; } else { *decided = loc; era_cnt = -1; break; } *decided = raw; } get_number (0, 9999, 4); tm->tm_year = val - 1900; want_century = 0; want_xday = 1; break; case 'x': if (*decided != raw) { const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_FMT); if (*fmt == '\0') fmt = _NL_CURRENT (LC_TIME, D_FMT); if (!recursive (fmt)) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (fmt, HERE_D_FMT)) *decided = loc; break; } *decided = raw; } if (!recursive (HERE_D_FMT)) return NULL; break; case 'X': if (*decided != raw) { const char *fmt = _NL_CURRENT (LC_TIME, ERA_T_FMT); if (*fmt == '\0') fmt = _NL_CURRENT (LC_TIME, T_FMT); if (!recursive (fmt)) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (fmt, HERE_T_FMT)) *decided = loc; break; } *decided = raw; } if (!recursive (HERE_T_FMT)) return NULL; break; default: return NULL; } break; #else /* We have no information about the era format. Just use the normal format. */ if (*fmt != 'c' && *fmt != 'C' && *fmt != 'y' && *fmt != 'Y' && *fmt != 'x' && *fmt != 'X') /* This is an illegal format. */ return NULL; goto start_over; #endif case 'O': switch (*fmt++) { case 'd': case 'e': /* Match day of month using alternate numeric symbols. */ get_alt_number (1, 31, 2); tm->tm_mday = val; have_mday = 1; want_xday = 1; break; case 'H': /* Match hour in 24-hour clock using alternate numeric symbols. */ get_alt_number (0, 23, 2); tm->tm_hour = val; have_I = 0; break; case 'I': /* Match hour in 12-hour clock using alternate numeric symbols. */ get_alt_number (1, 12, 2); tm->tm_hour = val - 1; have_I = 1; break; case 'm': /* Match month using alternate numeric symbols. */ get_alt_number (1, 12, 2); tm->tm_mon = val - 1; have_mon = 1; want_xday = 1; break; case 'M': /* Match minutes using alternate numeric symbols. */ get_alt_number (0, 59, 2); tm->tm_min = val; break; case 'S': /* Match seconds using alternate numeric symbols. */ get_alt_number (0, 61, 2); tm->tm_sec = val; break; case 'U': case 'V': case 'W': get_alt_number (0, 53, 2); /* XXX This cannot determine any field in TM without further information. */ break; case 'w': /* Match number of weekday using alternate numeric symbols. */ get_alt_number (0, 6, 1); tm->tm_wday = val; have_wday = 1; break; case 'y': /* Match year within century using alternate numeric symbols. */ get_alt_number (0, 99, 2); tm->tm_year = val >= 69 ? val : val + 100; want_xday = 1; break; default: return NULL; } break; default: return NULL; } } if (have_I && is_pm) tm->tm_hour += 12; if (century != -1) { if (want_century) tm->tm_year = tm->tm_year % 100 + (century - 19) * 100; else /* Only the century, but not the year. Strange, but so be it. */ tm->tm_year = (century - 19) * 100; } #ifdef _NL_CURRENT if (era_cnt != -1) { era = _nl_select_era_entry(era_cnt); if (want_era) tm->tm_year = (era->start_date[0] + ((tm->tm_year - era->offset) * era->absolute_direction)); else /* Era start year assumed. */ tm->tm_year = era->start_date[0]; } else #endif if (want_era) return NULL; if (want_xday && !have_wday) { if ( !(have_mon && have_mday) && have_yday) { /* We don't have tm_mon and/or tm_mday, compute them. */ int t_mon = 0; while (__mon_yday[__isleap(1900 + tm->tm_year)][t_mon] <= tm->tm_yday) t_mon++; if (!have_mon) tm->tm_mon = t_mon - 1; if (!have_mday) tm->tm_mday = (tm->tm_yday - __mon_yday[__isleap(1900 + tm->tm_year)][t_mon - 1] + 1); } day_of_the_week (tm); } if (want_xday && !have_yday) day_of_the_year (tm); return discard_const_p(char, rp); } char *rep_strptime(const char *buf, const char *format, struct tm *tm) { enum locale_status decided; #ifdef _NL_CURRENT decided = not; #else decided = raw; #endif return strptime_internal (buf, format, tm, &decided, -1); } ntdb-1.0/lib/replace/system/000077500000000000000000000000001224151530700160065ustar00rootroot00000000000000ntdb-1.0/lib/replace/system/README000066400000000000000000000003621224151530700166670ustar00rootroot00000000000000This directory contains wrappers around logical groups of system include files. The idea is to avoid #ifdef blocks in the main code, and instead put all the necessary conditional includes in subsystem specific header files in this directory. ntdb-1.0/lib/replace/system/aio.h000066400000000000000000000020141224151530700167240ustar00rootroot00000000000000#ifndef _system_aio_h #define _system_aio_h /* Unix SMB/CIFS implementation. AIO system include wrappers Copyright (C) Andrew Tridgell 2006 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_LIBAIO_H #include #endif #endif ntdb-1.0/lib/replace/system/capability.h000066400000000000000000000031601224151530700203000ustar00rootroot00000000000000#ifndef _system_capability_h #define _system_capability_h /* Unix SMB/CIFS implementation. capability system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYS_CAPABILITY_H #if defined(BROKEN_REDHAT_7_SYSTEM_HEADERS) && !defined(_I386_STATFS_H) && !defined(_PPC_STATFS_H) #define _I386_STATFS_H #define _PPC_STATFS_H #define BROKEN_REDHAT_7_STATFS_WORKAROUND #endif #if defined(BROKEN_RHEL5_SYS_CAP_HEADER) && !defined(_LINUX_TYPES_H) #define BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND #endif #include #ifdef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND #undef _LINUX_TYPES_H #undef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND #endif #ifdef BROKEN_REDHAT_7_STATFS_WORKAROUND #undef _PPC_STATFS_H #undef _I386_STATFS_H #undef BROKEN_REDHAT_7_STATFS_WORKAROUND #endif #endif #endif ntdb-1.0/lib/replace/system/dir.h000066400000000000000000000034401224151530700167360ustar00rootroot00000000000000#ifndef _system_dir_h #define _system_dir_h /* Unix SMB/CIFS implementation. directory system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #if HAVE_DIRENT_H # include # define NAMLEN(dirent) strlen((dirent)->d_name) #else # define dirent direct # define NAMLEN(dirent) (dirent)->d_namlen # if HAVE_SYS_NDIR_H # include # endif # if HAVE_SYS_DIR_H # include # endif # if HAVE_NDIR_H # include # endif #endif #ifndef HAVE_MKDIR_MODE #define mkdir(dir, mode) mkdir(dir) #endif /* Test whether a file name is the "." or ".." directory entries. * These really should be inline functions. */ #ifndef ISDOT #define ISDOT(path) ( \ *((const char *)(path)) == '.' && \ *(((const char *)(path)) + 1) == '\0' \ ) #endif #ifndef ISDOTDOT #define ISDOTDOT(path) ( \ *((const char *)(path)) == '.' && \ *(((const char *)(path)) + 1) == '.' && \ *(((const char *)(path)) + 2) == '\0' \ ) #endif #endif ntdb-1.0/lib/replace/system/filesys.h000066400000000000000000000153041224151530700176400ustar00rootroot00000000000000#ifndef _system_filesys_h #define _system_filesys_h /* Unix SMB/CIFS implementation. filesystem system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_MNTENT_H #include #endif #ifdef HAVE_SYS_VFS_H #include #endif #ifdef HAVE_SYS_ACL_H #include #endif #ifdef HAVE_ACL_LIBACL_H #include #endif #ifdef HAVE_SYS_FS_S5PARAM_H #include #endif #if defined (HAVE_SYS_FILSYS_H) && !defined (_CRAY) #include #endif #ifdef HAVE_SYS_STATFS_H # include #endif #ifdef HAVE_DUSTAT_H #include #endif #ifdef HAVE_SYS_STATVFS_H #include #endif #ifdef HAVE_SYS_FILIO_H #include #endif #ifdef HAVE_SYS_FILE_H #include #endif #ifdef HAVE_FCNTL_H #include #else #ifdef HAVE_SYS_FCNTL_H #include #endif #endif #ifdef HAVE_SYS_MODE_H /* apparently AIX needs this for S_ISLNK */ #ifndef S_ISLNK #include #endif #endif #ifdef HAVE_SYS_IOCTL_H #include #endif #ifdef HAVE_SYS_UIO_H #include #endif /* * Veritas File System. Often in addition to native. * Quotas different. */ #if defined(HAVE_SYS_FS_VX_QUOTA_H) #define VXFS_QUOTA #endif #if HAVE_SYS_ATTRIBUTES_H #include #elif HAVE_ATTR_ATTRIBUTES_H #include #endif /* mutually exclusive (SuSE 8.2) */ #if HAVE_ATTR_XATTR_H #include #elif HAVE_SYS_XATTR_H #include #endif #ifdef HAVE_SYS_EA_H #include #endif #ifdef HAVE_SYS_EXTATTR_H #include #endif #ifdef HAVE_SYS_RESOURCE_H #include #endif #ifndef XATTR_CREATE #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ #endif #ifndef XATTR_REPLACE #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ #endif /* Some POSIX definitions for those without */ #ifndef S_IFDIR #define S_IFDIR 0x4000 #endif #ifndef S_ISDIR #define S_ISDIR(mode) ((mode & 0xF000) == S_IFDIR) #endif #ifndef S_IRWXU #define S_IRWXU 00700 /* read, write, execute: owner */ #endif #ifndef S_IRUSR #define S_IRUSR 00400 /* read permission: owner */ #endif #ifndef S_IWUSR #define S_IWUSR 00200 /* write permission: owner */ #endif #ifndef S_IXUSR #define S_IXUSR 00100 /* execute permission: owner */ #endif #ifndef S_IRWXG #define S_IRWXG 00070 /* read, write, execute: group */ #endif #ifndef S_IRGRP #define S_IRGRP 00040 /* read permission: group */ #endif #ifndef S_IWGRP #define S_IWGRP 00020 /* write permission: group */ #endif #ifndef S_IXGRP #define S_IXGRP 00010 /* execute permission: group */ #endif #ifndef S_IRWXO #define S_IRWXO 00007 /* read, write, execute: other */ #endif #ifndef S_IROTH #define S_IROTH 00004 /* read permission: other */ #endif #ifndef S_IWOTH #define S_IWOTH 00002 /* write permission: other */ #endif #ifndef S_IXOTH #define S_IXOTH 00001 /* execute permission: other */ #endif #ifndef O_ACCMODE #define O_ACCMODE (O_RDONLY | O_WRONLY | O_RDWR) #endif #ifndef MAXPATHLEN #define MAXPATHLEN 256 #endif #ifndef SEEK_SET #define SEEK_SET 0 #endif #ifdef _WIN32 #define mkdir(d,m) _mkdir(d) #endif #ifdef UID_WRAPPER # ifndef UID_WRAPPER_DISABLE # ifndef UID_WRAPPER_NOT_REPLACE # define UID_WRAPPER_REPLACE # endif /* UID_WRAPPER_NOT_REPLACE */ # include "../uid_wrapper/uid_wrapper.h" # endif /* UID_WRAPPER_DISABLE */ #else /* UID_WRAPPER */ # define uwrap_enabled() 0 #endif /* UID_WRAPPER */ /* this allows us to use a uniform error handling for our xattr wrappers */ #ifndef ENOATTR #define ENOATTR ENODATA #endif #if !defined(HAVE_GETXATTR) || defined(XATTR_ADDITIONAL_OPTIONS) ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size); #define getxattr(path, name, value, size) rep_getxattr(path, name, value, size) /* define is in "replace.h" */ #endif #if !defined(HAVE_FGETXATTR) || defined(XATTR_ADDITIONAL_OPTIONS) ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size); #define fgetxattr(filedes, name, value, size) rep_fgetxattr(filedes, name, value, size) /* define is in "replace.h" */ #endif #if !defined(HAVE_LISTXATTR) || defined(XATTR_ADDITIONAL_OPTIONS) ssize_t rep_listxattr (const char *path, char *list, size_t size); #define listxattr(path, list, size) rep_listxattr(path, list, size) /* define is in "replace.h" */ #endif #if !defined(HAVE_FLISTXATTR) || defined(XATTR_ADDITIONAL_OPTIONS) ssize_t rep_flistxattr (int filedes, char *list, size_t size); #define flistxattr(filedes, value, size) rep_flistxattr(filedes, value, size) /* define is in "replace.h" */ #endif #if !defined(HAVE_REMOVEXATTR) || defined(XATTR_ADDITIONAL_OPTIONS) int rep_removexattr (const char *path, const char *name); #define removexattr(path, name) rep_removexattr(path, name) /* define is in "replace.h" */ #endif #if !defined(HAVE_FREMOVEXATTR) || defined(XATTR_ADDITIONAL_OPTIONS) int rep_fremovexattr (int filedes, const char *name); #define fremovexattr(filedes, name) rep_fremovexattr(filedes, name) /* define is in "replace.h" */ #endif #if !defined(HAVE_SETXATTR) || defined(XATTR_ADDITIONAL_OPTIONS) int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags); #define setxattr(path, name, value, size, flags) rep_setxattr(path, name, value, size, flags) /* define is in "replace.h" */ #endif #if !defined(HAVE_FSETXATTR) || defined(XATTR_ADDITIONAL_OPTIONS) int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags); #define fsetxattr(filedes, name, value, size, flags) rep_fsetxattr(filedes, name, value, size, flags) /* define is in "replace.h" */ #endif #endif ntdb-1.0/lib/replace/system/glob.h000066400000000000000000000020771224151530700171100ustar00rootroot00000000000000#ifndef _system_glob_h #define _system_glob_h /* Unix SMB/CIFS implementation. glob system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_GLOB_H #include #endif #ifdef HAVE_FNMATCH_H #include #endif #endif ntdb-1.0/lib/replace/system/gssapi.h000066400000000000000000000026531224151530700174530ustar00rootroot00000000000000#ifndef _system_gssapi_h #define _system_gssapi_h /* Unix SMB/CIFS implementation. GSSAPI system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_GSSAPI #ifdef HAVE_GSSAPI_GSSAPI_EXT_H #include #elif HAVE_GSSAPI_GSSAPI_H #include #elif HAVE_GSSAPI_GSSAPI_GENERIC_H #include #elif HAVE_GSSAPI_H #include #endif #if HAVE_GSSAPI_GSSAPI_KRB5_H #include #endif #if HAVE_GSSAPI_GSSAPI_SPNEGO_H #include #elif HAVE_GSSAPI_SPNEGO_H #include #endif #endif #endif ntdb-1.0/lib/replace/system/iconv.h000066400000000000000000000030461224151530700173000ustar00rootroot00000000000000#ifndef _system_iconv_h #define _system_iconv_h /* Unix SMB/CIFS implementation. iconv memory system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #if !defined(HAVE_ICONV) && defined(HAVE_ICONV_H) #define HAVE_ICONV #endif #if !defined(HAVE_GICONV) && defined(HAVE_GICONV_H) #define HAVE_GICONV #endif #if !defined(HAVE_BICONV) && defined(HAVE_BICONV_H) #define HAVE_BICONV #endif #ifdef HAVE_NATIVE_ICONV #if defined(HAVE_ICONV) #include #elif defined(HAVE_GICONV) #include #elif defined(HAVE_BICONV) #include #endif #endif /* HAVE_NATIVE_ICONV */ /* needed for some systems without iconv. Doesn't really matter what error code we use */ #ifndef EILSEQ #define EILSEQ EIO #endif #endif ntdb-1.0/lib/replace/system/kerberos.h000066400000000000000000000021311224151530700177700ustar00rootroot00000000000000#ifndef _system_kerberos_h #define _system_kerberos_h /* Unix SMB/CIFS implementation. kerberos system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_KRB5 #if HAVE_KRB5_H #include #endif #if HAVE_COM_ERR_H #include #endif #endif #endif ntdb-1.0/lib/replace/system/locale.h000066400000000000000000000021641224151530700174210ustar00rootroot00000000000000#ifndef _system_locale_h #define _system_locale_h /* Unix SMB/CIFS implementation. locale include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_CTYPE_H #include #endif #ifdef HAVE_LOCALE_H #include #endif #ifdef HAVE_LANGINFO_H #include #endif #endif ntdb-1.0/lib/replace/system/network.h000066400000000000000000000202511224151530700176500ustar00rootroot00000000000000#ifndef _system_network_h #define _system_network_h /* Unix SMB/CIFS implementation. networking system include wrappers Copyright (C) Andrew Tridgell 2004 Copyright (C) Jelmer Vernooij 2007 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifndef LIBREPLACE_NETWORK_CHECKS #error "AC_LIBREPLACE_NETWORK_CHECKS missing in configure" #endif #include #ifdef HAVE_SYS_SOCKET_H #include #endif #ifdef HAVE_UNIXSOCKET #include #endif #ifdef HAVE_NETINET_IN_H #include #endif #ifdef HAVE_ARPA_INET_H #include #endif #ifdef HAVE_NETDB_H #include #endif #ifdef HAVE_NETINET_TCP_H #include #endif /* * The next three defines are needed to access the IPTOS_* options * on some systems. */ #ifdef HAVE_NETINET_IN_SYSTM_H #include #endif #ifdef HAVE_NETINET_IN_IP_H #include #endif #ifdef HAVE_NETINET_IP_H #include #endif #ifdef HAVE_NET_IF_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_SYS_IOCTL_H #include #endif #ifdef HAVE_SYS_UIO_H #include #endif #ifdef HAVE_STROPTS_H #include #endif #ifndef HAVE_SOCKLEN_T #define HAVE_SOCKLEN_T typedef int socklen_t; #endif #if !defined (HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA) /* define is in "replace.h" */ char *rep_inet_ntoa(struct in_addr ip); #endif #ifndef HAVE_INET_PTON /* define is in "replace.h" */ int rep_inet_pton(int af, const char *src, void *dst); #endif #ifndef HAVE_INET_NTOP /* define is in "replace.h" */ const char *rep_inet_ntop(int af, const void *src, char *dst, socklen_t size); #endif #ifndef HAVE_INET_ATON /* define is in "replace.h" */ int rep_inet_aton(const char *src, struct in_addr *dst); #endif #ifndef HAVE_CONNECT /* define is in "replace.h" */ int rep_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen); #endif #ifndef HAVE_GETHOSTBYNAME /* define is in "replace.h" */ struct hostent *rep_gethostbyname(const char *name); #endif #ifdef HAVE_IFADDRS_H #include #endif #ifndef HAVE_STRUCT_IFADDRS struct ifaddrs { struct ifaddrs *ifa_next; /* Pointer to next struct */ char *ifa_name; /* Interface name */ unsigned int ifa_flags; /* Interface flags */ struct sockaddr *ifa_addr; /* Interface address */ struct sockaddr *ifa_netmask; /* Interface netmask */ #undef ifa_dstaddr struct sockaddr *ifa_dstaddr; /* P2P interface destination */ void *ifa_data; /* Address specific data */ }; #endif #ifndef HAVE_GETIFADDRS int rep_getifaddrs(struct ifaddrs **); #endif #ifndef HAVE_FREEIFADDRS void rep_freeifaddrs(struct ifaddrs *); #endif #ifndef HAVE_SOCKETPAIR /* define is in "replace.h" */ int rep_socketpair(int d, int type, int protocol, int sv[2]); #endif /* * Some systems have getaddrinfo but not the * defines needed to use it. */ /* Various macros that ought to be in , but might not be */ #ifndef EAI_FAIL #define EAI_BADFLAGS (-1) #define EAI_NONAME (-2) #define EAI_AGAIN (-3) #define EAI_FAIL (-4) #define EAI_FAMILY (-6) #define EAI_SOCKTYPE (-7) #define EAI_SERVICE (-8) #define EAI_MEMORY (-10) #define EAI_SYSTEM (-11) #endif /* !EAI_FAIL */ #ifndef AI_PASSIVE #define AI_PASSIVE 0x0001 #endif #ifndef AI_CANONNAME #define AI_CANONNAME 0x0002 #endif #ifndef AI_NUMERICHOST /* * some platforms don't support AI_NUMERICHOST; define as zero if using * the system version of getaddrinfo... */ #if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) #define AI_NUMERICHOST 0 #else #define AI_NUMERICHOST 0x0004 #endif #endif /* * Some of the functions in source3/lib/util_sock.c use AI_ADDRCONFIG. On QNX * 6.3.0, this macro is defined but, if it's used, getaddrinfo will fail. This * prevents smbd from opening any sockets. * * If I undefine AI_ADDRCONFIG on such systems and define it to be 0, * this works around the issue. */ #ifdef __QNX__ #include #if _NTO_VERSION == 630 #undef AI_ADDRCONFIG #endif #endif #ifndef AI_ADDRCONFIG /* * logic copied from AI_NUMERICHOST */ #if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) #define AI_ADDRCONFIG 0 #else #define AI_ADDRCONFIG 0x0020 #endif #endif #ifndef AI_NUMERICSERV /* * logic copied from AI_NUMERICHOST */ #if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) #define AI_NUMERICSERV 0 #else #define AI_NUMERICSERV 0x0400 #endif #endif #ifndef NI_NUMERICHOST #define NI_NUMERICHOST 1 #endif #ifndef NI_NUMERICSERV #define NI_NUMERICSERV 2 #endif #ifndef NI_NOFQDN #define NI_NOFQDN 4 #endif #ifndef NI_NAMEREQD #define NI_NAMEREQD 8 #endif #ifndef NI_DGRAM #define NI_DGRAM 16 #endif #ifndef NI_MAXHOST #define NI_MAXHOST 1025 #endif #ifndef NI_MAXSERV #define NI_MAXSERV 32 #endif /* * glibc on linux doesn't seem to have MSG_WAITALL * defined. I think the kernel has it though.. */ #ifndef MSG_WAITALL #define MSG_WAITALL 0 #endif #ifndef INADDR_LOOPBACK #define INADDR_LOOPBACK 0x7f000001 #endif #ifndef INADDR_NONE #define INADDR_NONE 0xffffffff #endif #ifndef EAFNOSUPPORT #define EAFNOSUPPORT EINVAL #endif #ifndef INET_ADDRSTRLEN #define INET_ADDRSTRLEN 16 #endif #ifndef INET6_ADDRSTRLEN #define INET6_ADDRSTRLEN 46 #endif #ifndef HOST_NAME_MAX #define HOST_NAME_MAX 255 #endif #ifndef MAXHOSTNAMELEN #define MAXHOSTNAMELEN HOST_NAME_MAX #endif #ifndef HAVE_SA_FAMILY_T #define HAVE_SA_FAMILY_T typedef unsigned short int sa_family_t; #endif #ifndef HAVE_STRUCT_SOCKADDR_STORAGE #define HAVE_STRUCT_SOCKADDR_STORAGE #ifdef HAVE_STRUCT_SOCKADDR_IN6 #define sockaddr_storage sockaddr_in6 #define ss_family sin6_family #define HAVE_SS_FAMILY 1 #else /*HAVE_STRUCT_SOCKADDR_IN6*/ #define sockaddr_storage sockaddr_in #define ss_family sin_family #define HAVE_SS_FAMILY 1 #endif /*HAVE_STRUCT_SOCKADDR_IN6*/ #endif /*HAVE_STRUCT_SOCKADDR_STORAGE*/ #ifndef HAVE_SS_FAMILY #ifdef HAVE___SS_FAMILY #define ss_family __ss_family #define HAVE_SS_FAMILY 1 #endif #endif #ifndef IOV_MAX # ifdef UIO_MAXIOV # define IOV_MAX UIO_MAXIOV # else # ifdef __sgi /* * IRIX 6.5 has sysconf(_SC_IOV_MAX) * which might return 512 or bigger */ # define IOV_MAX 512 # endif # endif #endif #ifndef HAVE_STRUCT_ADDRINFO #define HAVE_STRUCT_ADDRINFO struct addrinfo { int ai_flags; int ai_family; int ai_socktype; int ai_protocol; socklen_t ai_addrlen; struct sockaddr *ai_addr; char *ai_canonname; struct addrinfo *ai_next; }; #endif /* HAVE_STRUCT_ADDRINFO */ #if !defined(HAVE_GETADDRINFO) #include "getaddrinfo.h" #endif /* Needed for some systems that don't define it (Solaris). */ #ifndef ifr_netmask #define ifr_netmask ifr_addr #endif /* Some old Linux systems have broken header files */ #ifdef HAVE_IPV6 #ifdef HAVE_LINUX_IPV6_V6ONLY_26 #define IPV6_V6ONLY 26 #endif /* HAVE_LINUX_IPV6_V6ONLY_26 */ #endif /* HAVE_IPV6 */ #ifdef SOCKET_WRAPPER #ifndef SOCKET_WRAPPER_DISABLE #ifndef SOCKET_WRAPPER_NOT_REPLACE #define SOCKET_WRAPPER_REPLACE #endif /* SOCKET_WRAPPER_NOT_REPLACE */ #include "../socket_wrapper/socket_wrapper.h" #endif /* SOCKET_WRAPPER_DISABLE */ #endif /* SOCKET_WRAPPER */ #ifdef UID_WRAPPER # ifndef UID_WRAPPER_DISABLE # ifndef UID_WRAPPER_NOT_REPLACE # define UID_WRAPPER_REPLACE # endif /* UID_WRAPPER_NOT_REPLACE */ # include "../uid_wrapper/uid_wrapper.h" # endif /* UID_WRAPPER_DISABLE */ #else /* UID_WRAPPER */ # define uwrap_enabled() 0 #endif /* UID_WRAPPER */ #endif ntdb-1.0/lib/replace/system/passwd.h000066400000000000000000000053171224151530700174660ustar00rootroot00000000000000#ifndef _system_passwd_h #define _system_passwd_h /* Unix SMB/CIFS implementation. passwd system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ /* this needs to be included before nss_wrapper.h on some systems */ #include #ifdef HAVE_PWD_H #include #endif #ifdef HAVE_GRP_H #include #endif #ifdef HAVE_SYS_PRIV_H #include #endif #ifdef HAVE_SYS_ID_H #include #endif #ifdef HAVE_CRYPT_H #include #endif #ifdef HAVE_SHADOW_H #include #endif #ifdef HAVE_SYS_SECURITY_H #include #include #define PASSWORD_LENGTH 16 #endif /* HAVE_SYS_SECURITY_H */ #ifdef HAVE_GETPWANAM #include #include #include #endif #ifdef HAVE_COMPAT_H #include #endif #ifndef NGROUPS_MAX #define NGROUPS_MAX 32 /* Guess... */ #endif /* what is the longest significant password available on your system? Knowing this speeds up password searches a lot */ #ifndef PASSWORD_LENGTH #define PASSWORD_LENGTH 8 #endif #if defined(HAVE_PUTPRPWNAM) && defined(AUTH_CLEARTEXT_SEG_CHARS) #define OSF1_ENH_SEC 1 #endif #ifndef ALLOW_CHANGE_PASSWORD #if (defined(HAVE_TERMIOS_H) && defined(HAVE_DUP2) && defined(HAVE_SETSID)) #define ALLOW_CHANGE_PASSWORD 1 #endif #endif #if defined(HAVE_CRYPT16) && defined(HAVE_GETAUTHUID) #define ULTRIX_AUTH 1 #endif #ifdef NSS_WRAPPER #ifndef NSS_WRAPPER_DISABLE #ifndef NSS_WRAPPER_NOT_REPLACE #define NSS_WRAPPER_REPLACE #endif /* NSS_WRAPPER_NOT_REPLACE */ #include "../nss_wrapper/nss_wrapper.h" #endif /* NSS_WRAPPER_DISABLE */ #endif /* NSS_WRAPPER */ #ifdef UID_WRAPPER # ifndef UID_WRAPPER_DISABLE # ifndef UID_WRAPPER_NOT_REPLACE # define UID_WRAPPER_REPLACE # endif /* UID_WRAPPER_NOT_REPLACE */ # include "../uid_wrapper/uid_wrapper.h" # endif /* UID_WRAPPER_DISABLE */ #else /* UID_WRAPPER */ # define uwrap_enabled() 0 #endif /* UID_WRAPPER */ #endif ntdb-1.0/lib/replace/system/readline.h000066400000000000000000000032131224151530700177410ustar00rootroot00000000000000#ifndef _system_readline_h #define _system_readline_h /* Unix SMB/CIFS implementation. Readline wrappers ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_LIBREADLINE # ifdef HAVE_READLINE_READLINE_H # include # ifdef HAVE_READLINE_HISTORY_H # include # endif # else # ifdef HAVE_READLINE_H # include # ifdef HAVE_HISTORY_H # include # endif # else # undef HAVE_LIBREADLINE # endif # endif #endif #ifdef HAVE_NEW_LIBREADLINE #ifdef HAVE_CPPFUNCTION # define RL_COMPLETION_CAST (CPPFunction *) #elif HAVE_RL_COMPLETION_T # define RL_COMPLETION_CAST (rl_completion_t *) #else # define RL_COMPLETION_CAST #endif #else /* This type is missing from libreadline<4.0 (approximately) */ # define RL_COMPLETION_CAST #endif /* HAVE_NEW_LIBREADLINE */ #endif ntdb-1.0/lib/replace/system/select.h000066400000000000000000000045361224151530700174460ustar00rootroot00000000000000#ifndef _system_select_h #define _system_select_h /* Unix SMB/CIFS implementation. select system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYS_SELECT_H #include #endif #ifdef HAVE_SYS_EPOLL_H #include #endif #ifndef SELECT_CAST #define SELECT_CAST #endif #ifdef HAVE_POLL #include #else /* Type used for the number of file descriptors. */ typedef unsigned long int nfds_t; /* Data structure describing a polling request. */ struct pollfd { int fd; /* File descriptor to poll. */ short int events; /* Types of events poller cares about. */ short int revents; /* Types of events that actually occurred. */ }; /* Event types that can be polled for. These bits may be set in `events' to indicate the interesting event types; they will appear in `revents' to indicate the status of the file descriptor. */ #define POLLIN 0x001 /* There is data to read. */ #define POLLPRI 0x002 /* There is urgent data to read. */ #define POLLOUT 0x004 /* Writing now will not block. */ #define POLLRDNORM 0x040 /* Normal data may be read. */ #define POLLRDBAND 0x080 /* Priority data may be read. */ #define POLLWRNORM 0x100 /* Writing now will not block. */ #define POLLWRBAND 0x200 /* Priority data may be written. */ #define POLLERR 0x008 /* Error condition. */ #define POLLHUP 0x010 /* Hung up. */ #define POLLNVAL 0x020 /* Invalid polling request. */ /* define is in "replace.h" */ int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout); #endif #endif ntdb-1.0/lib/replace/system/shmem.h000066400000000000000000000026251224151530700172750ustar00rootroot00000000000000#ifndef _system_shmem_h #define _system_shmem_h /* Unix SMB/CIFS implementation. shared memory system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #if defined(HAVE_SYS_IPC_H) #include #endif /* HAVE_SYS_IPC_H */ #if defined(HAVE_SYS_SHM_H) #include #endif /* HAVE_SYS_SHM_H */ #ifdef HAVE_SYS_MMAN_H #include #endif /* NetBSD doesn't have these */ #ifndef SHM_R #define SHM_R 0400 #endif #ifndef SHM_W #define SHM_W 0200 #endif #ifndef MAP_FILE #define MAP_FILE 0 #endif #ifndef MAP_FAILED #define MAP_FAILED ((void *)-1) #endif #endif ntdb-1.0/lib/replace/system/syslog.h000066400000000000000000000034351224151530700175040ustar00rootroot00000000000000#ifndef _system_syslog_h #define _system_syslog_h /* Unix SMB/CIFS implementation. syslog system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYSLOG_H #include #else #ifdef HAVE_SYS_SYSLOG_H #include #endif #endif /* For sys_adminlog(). */ #ifndef LOG_EMERG #define LOG_EMERG 0 /* system is unusable */ #endif #ifndef LOG_ALERT #define LOG_ALERT 1 /* action must be taken immediately */ #endif #ifndef LOG_CRIT #define LOG_CRIT 2 /* critical conditions */ #endif #ifndef LOG_ERR #define LOG_ERR 3 /* error conditions */ #endif #ifndef LOG_WARNING #define LOG_WARNING 4 /* warning conditions */ #endif #ifndef LOG_NOTICE #define LOG_NOTICE 5 /* normal but significant condition */ #endif #ifndef LOG_INFO #define LOG_INFO 6 /* informational */ #endif #ifndef LOG_DEBUG #define LOG_DEBUG 7 /* debug-level messages */ #endif #endif ntdb-1.0/lib/replace/system/terminal.h000066400000000000000000000026251224151530700177770ustar00rootroot00000000000000#ifndef _system_terminal_h #define _system_terminal_h /* Unix SMB/CIFS implementation. terminal system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef SUNOS4 /* on SUNOS4 termios.h conflicts with sys/ioctl.h */ #undef HAVE_TERMIOS_H #endif #if defined(HAVE_TERMIOS_H) /* POSIX terminal handling. */ #include #elif defined(HAVE_TERMIO_H) /* Older SYSV terminal handling - don't use if we can avoid it. */ #include #elif defined(HAVE_SYS_TERMIO_H) /* Older SYSV terminal handling - don't use if we can avoid it. */ #include #endif #endif ntdb-1.0/lib/replace/system/time.h000066400000000000000000000044611224151530700171220ustar00rootroot00000000000000#ifndef _system_time_h #define _system_time_h /* Unix SMB/CIFS implementation. time system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef TIME_WITH_SYS_TIME #include #include #else #ifdef HAVE_SYS_TIME_H #include #else #include #endif #endif #ifdef HAVE_UTIME_H #include #else struct utimbuf { time_t actime; /* access time */ time_t modtime; /* modification time */ }; #endif #ifndef HAVE_STRUCT_TIMESPEC struct timespec { time_t tv_sec; /* Seconds. */ long tv_nsec; /* Nanoseconds. */ }; #endif #ifndef HAVE_MKTIME /* define is in "replace.h" */ time_t rep_mktime(struct tm *t); #endif #ifndef HAVE_TIMEGM /* define is in "replace.h" */ time_t rep_timegm(struct tm *tm); #endif #ifndef HAVE_UTIME /* define is in "replace.h" */ int rep_utime(const char *filename, const struct utimbuf *buf); #endif #ifndef HAVE_UTIMES /* define is in "replace.h" */ int rep_utimes(const char *filename, const struct timeval tv[2]); #endif #ifndef HAVE_CLOCK_GETTIME /* CLOCK_REALTIME is required by POSIX */ #define CLOCK_REALTIME 0 typedef int clockid_t; int rep_clock_gettime(clockid_t clk_id, struct timespec *tp); #endif /* make sure we have a best effort CUSTOM_CLOCK_MONOTONIC we can rely on */ #if defined(CLOCK_MONOTONIC) #define CUSTOM_CLOCK_MONOTONIC CLOCK_MONOTONIC #elif defined(CLOCK_HIGHRES) #define CUSTOM_CLOCK_MONOTONIC CLOCK_HIGHRES #else #define CUSTOM_CLOCK_MONOTONIC CLOCK_REALTIME #endif #endif ntdb-1.0/lib/replace/system/wait.h000066400000000000000000000025711224151530700171300ustar00rootroot00000000000000#ifndef _system_wait_h #define _system_wait_h /* Unix SMB/CIFS implementation. waitpid system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYS_WAIT_H #include #endif #include #ifndef SIGCLD #define SIGCLD SIGCHLD #endif #ifdef HAVE_SETJMP_H #include #endif #ifdef HAVE_SYS_UCONTEXT_H #include #endif #if !defined(HAVE_SIG_ATOMIC_T_TYPE) typedef int sig_atomic_t; #endif #if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4) int rep_waitpid(pid_t pid,int *status,int options) #endif #endif ntdb-1.0/lib/replace/system/wscript_configure000066400000000000000000000023031224151530700214630ustar00rootroot00000000000000#!/usr/bin/env python conf.CHECK_HEADERS('sys/capability.h') conf.CHECK_FUNCS('getpwnam_r getpwuid_r getpwent_r') # solaris varients of getXXent_r conf.CHECK_C_PROTOTYPE('getpwent_r', 'struct passwd *getpwent_r(struct passwd *src, char *buf, int buflen)', define='SOLARIS_GETPWENT_R', headers='pwd.h') conf.CHECK_C_PROTOTYPE('getgrent_r', 'struct group *getgrent_r(struct group *src, char *buf, int buflen)', define='SOLARIS_GETGRENT_R', headers='grp.h') # the irix varients conf.CHECK_C_PROTOTYPE('getpwent_r', 'struct passwd *getpwent_r(struct passwd *src, char *buf, size_t buflen)', define='SOLARIS_GETPWENT_R', headers='pwd.h') conf.CHECK_C_PROTOTYPE('getgrent_r', 'struct group *getgrent_r(struct group *src, char *buf, size_t buflen)', define='SOLARIS_GETGRENT_R', headers='grp.h') conf.CHECK_FUNCS('getgrouplist') conf.CHECK_HEADERS('ctype.h locale.h langinfo.h') conf.CHECK_HEADERS('fnmatch.h locale.h langinfo.h') conf.CHECK_HEADERS('sys/ipc.h sys/mman.h sys/shm.h') conf.CHECK_HEADERS('termios.h termio.h sys/termio.h') ntdb-1.0/lib/replace/test/000077500000000000000000000000001224151530700154415ustar00rootroot00000000000000ntdb-1.0/lib/replace/test/getifaddrs.c000066400000000000000000000050451224151530700177250ustar00rootroot00000000000000/* * Unix SMB/CIFS implementation. * * libreplace getifaddrs test * * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #ifndef AUTOCONF_TEST #include "replace.h" #include "system/network.h" #include "replace-test.h" #endif #ifdef HAVE_INET_NTOP #define rep_inet_ntop inet_ntop #endif static const char *format_sockaddr(struct sockaddr *addr, char *addrstring, socklen_t addrlen) { const char *result = NULL; if (addr->sa_family == AF_INET) { result = rep_inet_ntop(AF_INET, &((struct sockaddr_in *)addr)->sin_addr, addrstring, addrlen); #ifdef HAVE_STRUCT_SOCKADDR_IN6 } else if (addr->sa_family == AF_INET6) { result = rep_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)addr)->sin6_addr, addrstring, addrlen); #endif } return result; } int getifaddrs_test(void) { struct ifaddrs *ifs = NULL; struct ifaddrs *ifs_head = NULL; int ret; ret = getifaddrs(&ifs); ifs_head = ifs; if (ret != 0) { fprintf(stderr, "getifaddrs() failed: %s\n", strerror(errno)); return 1; } while (ifs) { printf("%-10s ", ifs->ifa_name); if (ifs->ifa_addr != NULL) { char addrstring[INET6_ADDRSTRLEN]; const char *result; result = format_sockaddr(ifs->ifa_addr, addrstring, sizeof(addrstring)); if (result != NULL) { printf("IP=%s ", addrstring); } if (ifs->ifa_netmask != NULL) { result = format_sockaddr(ifs->ifa_netmask, addrstring, sizeof(addrstring)); if (result != NULL) { printf("NETMASK=%s", addrstring); } } else { printf("AF=%d ", ifs->ifa_addr->sa_family); } } else { printf(""); } printf("\n"); ifs = ifs->ifa_next; } freeifaddrs(ifs_head); return 0; } ntdb-1.0/lib/replace/test/incoherent_mmap.c000066400000000000000000000034561224151530700207650ustar00rootroot00000000000000/* In OpenBSD, if you write to a file, another process doesn't see it * in its mmap. Returns with exit status 0 if that is the case, 1 if * it's coherent, and other if there's a problem. */ #include #include #include #include #include #include #include #include #include #define DATA "coherent.mmap" int main(int argc, char *argv[]) { int tochild[2], toparent[2]; int fd; volatile unsigned char *map; unsigned char *page; const char *fname = argv[1]; char c = 0; if (pipe(tochild) != 0 || pipe(toparent) != 0) err(2, "Creating pipe"); if (!fname) fname = DATA; fd = open(fname, O_RDWR|O_CREAT|O_TRUNC, 0600); if (fd < 0) err(2, "opening %s", fname); unlink(fname); switch (fork()) { case -1: err(2, "Fork"); case 0: close(tochild[1]); close(toparent[0]); /* Wait for parent to create file. */ if (read(tochild[0], &c, 1) != 1) err(2, "reading from parent"); /* Alter first byte. */ pwrite(fd, &c, 1, 0); if (write(toparent[1], &c, 1) != 1) err(2, "writing to parent"); exit(0); default: close(tochild[0]); close(toparent[1]); /* Create a file and mmap it. */ page = malloc(getpagesize()); memset(page, 0x42, getpagesize()); if (write(fd, page, getpagesize()) != getpagesize()) err(2, "writing first page"); map = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (map == MAP_FAILED) err(2, "mapping file"); if (*map != 0x42) errx(2, "first byte isn't 0x42!"); /* Tell child to alter file. */ if (write(tochild[1], &c, 1) != 1) err(2, "writing to child"); if (read(toparent[0], &c, 1) != 1) err(2, "reading from child"); if (*map) errx(0, "mmap incoherent: first byte isn't 0."); exit(1); } } ntdb-1.0/lib/replace/test/main.c000066400000000000000000000020501224151530700165260ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. libreplace tests Copyright (C) Jelmer Vernooij 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "replace-testsuite.h" int main(void) { bool ret = torture_local_replace(NULL); if (ret) return 0; return -1; } ntdb-1.0/lib/replace/test/os2_delete.c000066400000000000000000000050471224151530700176400ustar00rootroot00000000000000/* test readdir/unlink pattern that OS/2 uses tridge@samba.org July 2005 */ #include #include #include #include #include #include #include #include #include #include "replace-test.h" #define NUM_FILES 700 #define READDIR_SIZE 100 #define DELETE_SIZE 4 #define TESTDIR "test.dir" static int test_readdir_os2_delete_ret; #define FAILED(d) (printf("failure: readdir [\nFailed for %s - %d = %s\n]\n", d, errno, strerror(errno)), test_readdir_os2_delete_ret = 1, 1) #ifndef MIN #define MIN(a,b) ((a)<(b)?(a):(b)) #endif #ifdef _WIN32 #define mkdir(d,m) _mkdir(d) #endif static void cleanup(void) { /* I'm a lazy bastard */ if (system("rm -rf " TESTDIR)) { FAILED("system"); } mkdir(TESTDIR, 0700) == 0 || FAILED("mkdir"); } static void create_files(void) { int i; for (i=0;id_name); } if (i == 0) { return 0; } /* delete the first few */ for (j=0; jd_name, ".") == 0 || FAILED("match ."); de = readdir(d); strcmp(de->d_name, "..") == 0 || FAILED("match .."); while (1) { int n = os2_delete(d); if (n == 0) break; total_deleted += n; } closedir(d); fprintf(stderr, "Deleted %d files of %d\n", total_deleted, NUM_FILES); rmdir(TESTDIR) == 0 || FAILED("rmdir"); if (system("rm -rf " TESTDIR) == -1) { FAILED("system"); } return test_readdir_os2_delete_ret; } ntdb-1.0/lib/replace/test/shared_mmap.c000066400000000000000000000022251224151530700200660ustar00rootroot00000000000000/* this tests whether we can use a shared writeable mmap on a file - as needed for the mmap variant of FAST_SHARE_MODES */ #if defined(HAVE_UNISTD_H) #include #endif #include #include #include #include #define DATA "conftest.mmap" #ifndef MAP_FILE #define MAP_FILE 0 #endif main() { int *buf; int i; int fd = open(DATA,O_RDWR|O_CREAT|O_TRUNC,0666); int count=7; if (fd == -1) exit(1); for (i=0;i<10000;i++) { write(fd,&i,sizeof(i)); } close(fd); if (fork() == 0) { fd = open(DATA,O_RDWR); if (fd == -1) exit(1); buf = (int *)mmap(NULL, 10000*sizeof(int), (PROT_READ | PROT_WRITE), MAP_FILE | MAP_SHARED, fd, 0); while (count-- && buf[9124] != 55732) sleep(1); if (count <= 0) exit(1); buf[1763] = 7268; exit(0); } fd = open(DATA,O_RDWR); if (fd == -1) exit(1); buf = (int *)mmap(NULL, 10000*sizeof(int), (PROT_READ | PROT_WRITE), MAP_FILE | MAP_SHARED, fd, 0); if (buf == (int *)-1) exit(1); buf[9124] = 55732; while (count-- && buf[1763] != 7268) sleep(1); unlink(DATA); if (count > 0) exit(0); exit(1); } ntdb-1.0/lib/replace/test/shared_mremap.c000066400000000000000000000013271224151530700204170ustar00rootroot00000000000000/* this tests whether we can use mremap */ #if defined(HAVE_UNISTD_H) #include #endif #include #include #include #include #define DATA "conftest.mmap" #ifndef MAP_FILE #define MAP_FILE 0 #endif #ifndef MAP_FAILED #define MAP_FAILED (int *)-1 #endif main() { int *buf; int fd; int err = 1; fd = open(DATA, O_RDWR|O_CREAT|O_TRUNC, 0666); if (fd == -1) { exit(1); } buf = (int *)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, 0); if (buf == MAP_FAILED) { goto done; } buf = mremap(buf, 0x1000, 0x2000, MREMAP_MAYMOVE); if (buf == MAP_FAILED) { goto done; } err = 0; done: close(fd); unlink(DATA); exit(err); } ntdb-1.0/lib/replace/test/snprintf.c000066400000000000000000000013451224151530700174530ustar00rootroot00000000000000void foo(const char *format, ...) { va_list ap; int len; char buf[20]; long long l = 1234567890; l *= 100; va_start(ap, format); len = vsnprintf(buf, 0, format, ap); va_end(ap); if (len != 5) exit(1); va_start(ap, format); len = vsnprintf(0, 0, format, ap); va_end(ap); if (len != 5) exit(2); if (snprintf(buf, 3, "hello") != 5 || strcmp(buf, "he") != 0) exit(3); if (snprintf(buf, 20, "%lld", l) != 12 || strcmp(buf, "123456789000") != 0) exit(4); if (snprintf(buf, 20, "%zu", 123456789) != 9 || strcmp(buf, "123456789") != 0) exit(5); if (snprintf(buf, 20, "%2\$d %1\$d", 3, 4) != 3 || strcmp(buf, "4 3") != 0) exit(6); if (snprintf(buf, 20, "%s", 0) < 3) exit(7); printf("1"); exit(0); } main() { foo("hello"); } ntdb-1.0/lib/replace/test/strptime.c000066400000000000000000000066061224151530700174640ustar00rootroot00000000000000 #ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME #include #include #include #define true 1 #define false 0 #ifndef __STRING #define __STRING(x) #x #endif /* make printf a no-op */ #define printf if(0) printf #else /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */ #include "replace.h" #include "system/time.h" #include "replace-test.h" #endif /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */ int libreplace_test_strptime(void) { const char *s = "20070414101546Z"; char *ret; struct tm t, t2; memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); printf("test: strptime\n"); ret = strptime(s, "%Y%m%d%H%M%S", &t); if ( ret == NULL ) { printf("failure: strptime [\n" "returned NULL\n" "]\n"); return false; } if ( *ret != 'Z' ) { printf("failure: strptime [\n" "ret doesn't point to 'Z'\n" "]\n"); return false; } ret = strptime(s, "%Y%m%d%H%M%SZ", &t2); if ( ret == NULL ) { printf("failure: strptime [\n" "returned NULL with Z\n" "]\n"); return false; } if ( *ret != '\0' ) { printf("failure: strptime [\n" "ret doesn't point to '\\0'\n" "]\n"); return false; } #define CMP_TM_ELEMENT(t1,t2,elem) \ if (t1.elem != t2.elem) { \ printf("failure: strptime [\n" \ "result differs if the format string has a 'Z' at the end\n" \ "element: %s %d != %d\n" \ "]\n", \ __STRING(elen), t1.elem, t2.elem); \ return false; \ } CMP_TM_ELEMENT(t,t2,tm_sec); CMP_TM_ELEMENT(t,t2,tm_min); CMP_TM_ELEMENT(t,t2,tm_hour); CMP_TM_ELEMENT(t,t2,tm_mday); CMP_TM_ELEMENT(t,t2,tm_mon); CMP_TM_ELEMENT(t,t2,tm_year); CMP_TM_ELEMENT(t,t2,tm_wday); CMP_TM_ELEMENT(t,t2,tm_yday); CMP_TM_ELEMENT(t,t2,tm_isdst); if (t.tm_sec != 46) { printf("failure: strptime [\n" "tm_sec: expected: 46, got: %d\n" "]\n", t.tm_sec); return false; } if (t.tm_min != 15) { printf("failure: strptime [\n" "tm_min: expected: 15, got: %d\n" "]\n", t.tm_min); return false; } if (t.tm_hour != 10) { printf("failure: strptime [\n" "tm_hour: expected: 10, got: %d\n" "]\n", t.tm_hour); return false; } if (t.tm_mday != 14) { printf("failure: strptime [\n" "tm_mday: expected: 14, got: %d\n" "]\n", t.tm_mday); return false; } if (t.tm_mon != 3) { printf("failure: strptime [\n" "tm_mon: expected: 3, got: %d\n" "]\n", t.tm_mon); return false; } if (t.tm_year != 107) { printf("failure: strptime [\n" "tm_year: expected: 107, got: %d\n" "]\n", t.tm_year); return false; } if (t.tm_wday != 6) { /* saturday */ printf("failure: strptime [\n" "tm_wday: expected: 6, got: %d\n" "]\n", t.tm_wday); return false; } if (t.tm_yday != 103) { printf("failure: strptime [\n" "tm_yday: expected: 103, got: %d\n" "]\n", t.tm_yday); return false; } /* we don't test this as it depends on the host configuration if (t.tm_isdst != 0) { printf("failure: strptime [\n" "tm_isdst: expected: 0, got: %d\n" "]\n", t.tm_isdst); return false; }*/ printf("success: strptime\n"); return true; } #ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME int main (void) { int ret; ret = libreplace_test_strptime(); if (ret == false) return 1; return 0; } #endif ntdb-1.0/lib/replace/test/testsuite.c000066400000000000000000000705751224151530700176540ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. libreplace tests Copyright (C) Jelmer Vernooij 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "replace-test.h" #include "replace-testsuite.h" /* we include all the system/ include files here so that libreplace tests them in the build farm */ #include "system/capability.h" #include "system/dir.h" #include "system/filesys.h" #include "system/glob.h" #include "system/iconv.h" #include "system/locale.h" #include "system/network.h" #include "system/passwd.h" #include "system/readline.h" #include "system/select.h" #include "system/shmem.h" #include "system/syslog.h" #include "system/terminal.h" #include "system/time.h" #include "system/wait.h" #include "system/aio.h" #define TESTFILE "testfile.dat" /* test ftruncate() function */ static int test_ftruncate(void) { struct stat st; int fd; const int size = 1234; printf("test: ftruncate\n"); unlink(TESTFILE); fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); if (fd == -1) { printf("failure: ftruncate [\n" "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); return false; } if (ftruncate(fd, size) != 0) { printf("failure: ftruncate [\n%s\n]\n", strerror(errno)); return false; } if (fstat(fd, &st) != 0) { printf("failure: ftruncate [\nfstat failed - %s\n]\n", strerror(errno)); return false; } if (st.st_size != size) { printf("failure: ftruncate [\ngave wrong size %d - expected %d\n]\n", (int)st.st_size, size); return false; } unlink(TESTFILE); printf("success: ftruncate\n"); return true; } /* test strlcpy() function. see http://www.gratisoft.us/todd/papers/strlcpy.html */ static int test_strlcpy(void) { char buf[4]; const struct { const char *src; size_t result; } tests[] = { { "abc", 3 }, { "abcdef", 6 }, { "abcd", 4 }, { "", 0 }, { NULL, 0 } }; int i; printf("test: strlcpy\n"); for (i=0;tests[i].src;i++) { if (strlcpy(buf, tests[i].src, sizeof(buf)) != tests[i].result) { printf("failure: strlcpy [\ntest %d failed\n]\n", i); return false; } } printf("success: strlcpy\n"); return true; } static int test_strlcat(void) { char tmp[10]; printf("test: strlcat\n"); strlcpy(tmp, "", sizeof(tmp)); if (strlcat(tmp, "bla", 3) != 3) { printf("failure: strlcat [\ninvalid return code\n]\n"); return false; } if (strcmp(tmp, "bl") != 0) { printf("failure: strlcat [\nexpected \"bl\", got \"%s\"\n]\n", tmp); return false; } strlcpy(tmp, "da", sizeof(tmp)); if (strlcat(tmp, "me", 4) != 4) { printf("failure: strlcat [\nexpected \"dam\", got \"%s\"\n]\n", tmp); return false; } printf("success: strlcat\n"); return true; } static int test_mktime(void) { /* FIXME */ return true; } static int test_initgroups(void) { /* FIXME */ return true; } static int test_memmove(void) { /* FIXME */ return true; } static int test_strdup(void) { char *x; printf("test: strdup\n"); x = strdup("bla"); if (strcmp("bla", x) != 0) { printf("failure: strdup [\nfailed: expected \"bla\", got \"%s\"\n]\n", x); return false; } free(x); printf("success: strdup\n"); return true; } static int test_setlinebuf(void) { printf("test: setlinebuf\n"); setlinebuf(stdout); printf("success: setlinebuf\n"); return true; } static int test_vsyslog(void) { /* FIXME */ return true; } static int test_timegm(void) { /* FIXME */ return true; } static int test_setenv(void) { #define TEST_SETENV(key, value, overwrite, result) do { \ int _ret; \ char *_v; \ _ret = setenv(key, value, overwrite); \ if (_ret != 0) { \ printf("failure: setenv [\n" \ "setenv(%s, %s, %d) failed\n" \ "]\n", \ key, value, overwrite); \ return false; \ } \ _v=getenv(key); \ if (!_v) { \ printf("failure: setenv [\n" \ "getenv(%s) returned NULL\n" \ "]\n", \ key); \ return false; \ } \ if (strcmp(result, _v) != 0) { \ printf("failure: setenv [\n" \ "getenv(%s): '%s' != '%s'\n" \ "]\n", \ key, result, _v); \ return false; \ } \ } while(0) #define TEST_UNSETENV(key) do { \ char *_v; \ unsetenv(key); \ _v=getenv(key); \ if (_v) { \ printf("failure: setenv [\n" \ "getenv(%s): NULL != '%s'\n" \ "]\n", \ SETENVTEST_KEY, _v); \ return false; \ } \ } while (0) #define SETENVTEST_KEY "SETENVTESTKEY" #define SETENVTEST_VAL "SETENVTESTVAL" printf("test: setenv\n"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"1", 0, SETENVTEST_VAL"1"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"2", 0, SETENVTEST_VAL"1"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"3", 1, SETENVTEST_VAL"3"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"4", 1, SETENVTEST_VAL"4"); TEST_UNSETENV(SETENVTEST_KEY); TEST_UNSETENV(SETENVTEST_KEY); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"5", 0, SETENVTEST_VAL"5"); TEST_UNSETENV(SETENVTEST_KEY); TEST_UNSETENV(SETENVTEST_KEY); printf("success: setenv\n"); return true; } static int test_strndup(void) { char *x; printf("test: strndup\n"); x = strndup("bla", 0); if (strcmp(x, "") != 0) { printf("failure: strndup [\ninvalid\n]\n"); return false; } free(x); x = strndup("bla", 2); if (strcmp(x, "bl") != 0) { printf("failure: strndup [\ninvalid\n]\n"); return false; } free(x); x = strndup("bla", 10); if (strcmp(x, "bla") != 0) { printf("failure: strndup [\ninvalid\n]\n"); return false; } free(x); printf("success: strndup\n"); return true; } static int test_strnlen(void) { printf("test: strnlen\n"); if (strnlen("bla", 2) != 2) { printf("failure: strnlen [\nunexpected length\n]\n"); return false; } if (strnlen("some text\n", 0) != 0) { printf("failure: strnlen [\nunexpected length\n]\n"); return false; } if (strnlen("some text", 20) != 9) { printf("failure: strnlen [\nunexpected length\n]\n"); return false; } printf("success: strnlen\n"); return true; } static int test_waitpid(void) { /* FIXME */ return true; } static int test_seteuid(void) { /* FIXME */ return true; } static int test_setegid(void) { /* FIXME */ return true; } static int test_asprintf(void) { char *x; printf("test: asprintf\n"); if (asprintf(&x, "%d", 9) != 1) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); return false; } if (strcmp(x, "9") != 0) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); return false; } if (asprintf(&x, "dat%s", "a") != 4) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); return false; } if (strcmp(x, "data") != 0) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); return false; } printf("success: asprintf\n"); return true; } static int test_snprintf(void) { char tmp[10]; printf("test: snprintf\n"); if (snprintf(tmp, 3, "foo%d", 9) != 4) { printf("failure: snprintf [\nsnprintf return code failed\n]\n"); return false; } if (strcmp(tmp, "fo") != 0) { printf("failure: snprintf [\nsnprintf failed\n]\n"); return false; } printf("success: snprintf\n"); return true; } static int test_vasprintf(void) { /* FIXME */ return true; } static int test_vsnprintf(void) { /* FIXME */ return true; } static int test_opendir(void) { /* FIXME */ return true; } static int test_readdir(void) { printf("test: readdir\n"); if (test_readdir_os2_delete() != 0) { return false; } printf("success: readdir\n"); return true; } static int test_telldir(void) { /* FIXME */ return true; } static int test_seekdir(void) { /* FIXME */ return true; } static int test_dlopen(void) { /* FIXME: test dlopen, dlsym, dlclose, dlerror */ return true; } static int test_chroot(void) { /* FIXME: chroot() */ return true; } static int test_bzero(void) { /* FIXME: bzero */ return true; } static int test_strerror(void) { /* FIXME */ return true; } static int test_errno(void) { printf("test: errno\n"); errno = 3; if (errno != 3) { printf("failure: errno [\nerrno failed\n]\n"); return false; } printf("success: errno\n"); return true; } static int test_mkdtemp(void) { /* FIXME */ return true; } static int test_mkstemp(void) { /* FIXME */ return true; } static int test_pread(void) { /* FIXME */ return true; } static int test_pwrite(void) { /* FIXME */ return true; } static int test_inet_ntoa(void) { /* FIXME */ return true; } #define TEST_STRTO_X(type,fmt,func,str,base,res,diff,rrnoo) do {\ type _v; \ char _s[64]; \ char *_p = NULL;\ char *_ep = NULL; \ strlcpy(_s, str, sizeof(_s));\ if (diff >= 0) { \ _ep = &_s[diff]; \ } \ errno = 0; \ _v = func(_s, &_p, base); \ if (errno != rrnoo) { \ printf("failure: %s [\n" \ "\t%s\n" \ "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \ "\terrno: %d != %d\n" \ "]\n", \ __STRING(func), __location__, __STRING(func), \ str, diff, base, res, _v, rrnoo, errno); \ return false; \ } else if (_v != res) { \ printf("failure: %s [\n" \ "\t%s\n" \ "\t%s(\"%s\",%d,%d): " fmt " != " fmt "\n" \ "]\n", \ __STRING(func), __location__, __STRING(func), \ str, diff, base, res, _v); \ return false; \ } else if (_p != _ep) { \ printf("failure: %s [\n" \ "\t%s\n" \ "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \ "\tptr: %p - %p = %d != %d\n" \ "]\n", \ __STRING(func), __location__, __STRING(func), \ str, diff, base, res, _v, _ep, _p, (int)(diff - (_ep - _p)), diff); \ return false; \ } \ } while (0) static int test_strtoll(void) { printf("test: strtoll\n"); #define TEST_STRTOLL(str,base,res,diff,errnoo) TEST_STRTO_X(long long int, "%lld", strtoll,str,base,res,diff,errnoo) TEST_STRTOLL("15", 10, 15LL, 2, 0); TEST_STRTOLL(" 15", 10, 15LL, 4, 0); TEST_STRTOLL("15", 0, 15LL, 2, 0); TEST_STRTOLL(" 15 ", 0, 15LL, 3, 0); TEST_STRTOLL("+15", 10, 15LL, 3, 0); TEST_STRTOLL(" +15", 10, 15LL, 5, 0); TEST_STRTOLL("+15", 0, 15LL, 3, 0); TEST_STRTOLL(" +15 ", 0, 15LL, 4, 0); TEST_STRTOLL("-15", 10, -15LL, 3, 0); TEST_STRTOLL(" -15", 10, -15LL, 5, 0); TEST_STRTOLL("-15", 0, -15LL, 3, 0); TEST_STRTOLL(" -15 ", 0, -15LL, 4, 0); TEST_STRTOLL("015", 10, 15LL, 3, 0); TEST_STRTOLL(" 015", 10, 15LL, 5, 0); TEST_STRTOLL("015", 0, 13LL, 3, 0); TEST_STRTOLL(" 015", 0, 13LL, 5, 0); TEST_STRTOLL("0x15", 10, 0LL, 1, 0); TEST_STRTOLL(" 0x15", 10, 0LL, 3, 0); TEST_STRTOLL("0x15", 0, 21LL, 4, 0); TEST_STRTOLL(" 0x15", 0, 21LL, 6, 0); TEST_STRTOLL("10", 16, 16LL, 2, 0); TEST_STRTOLL(" 10 ", 16, 16LL, 4, 0); TEST_STRTOLL("0x10", 16, 16LL, 4, 0); TEST_STRTOLL("0x10", 0, 16LL, 4, 0); TEST_STRTOLL(" 0x10 ", 0, 16LL, 5, 0); TEST_STRTOLL("+10", 16, 16LL, 3, 0); TEST_STRTOLL(" +10 ", 16, 16LL, 5, 0); TEST_STRTOLL("+0x10", 16, 16LL, 5, 0); TEST_STRTOLL("+0x10", 0, 16LL, 5, 0); TEST_STRTOLL(" +0x10 ", 0, 16LL, 6, 0); TEST_STRTOLL("-10", 16, -16LL, 3, 0); TEST_STRTOLL(" -10 ", 16, -16LL, 5, 0); TEST_STRTOLL("-0x10", 16, -16LL, 5, 0); TEST_STRTOLL("-0x10", 0, -16LL, 5, 0); TEST_STRTOLL(" -0x10 ", 0, -16LL, 6, 0); TEST_STRTOLL("010", 16, 16LL, 3, 0); TEST_STRTOLL(" 010 ", 16, 16LL, 5, 0); TEST_STRTOLL("-010", 16, -16LL, 4, 0); TEST_STRTOLL("11", 8, 9LL, 2, 0); TEST_STRTOLL("011", 8, 9LL, 3, 0); TEST_STRTOLL("011", 0, 9LL, 3, 0); TEST_STRTOLL("-11", 8, -9LL, 3, 0); TEST_STRTOLL("-011", 8, -9LL, 4, 0); TEST_STRTOLL("-011", 0, -9LL, 4, 0); TEST_STRTOLL("011", 8, 9LL, 3, 0); TEST_STRTOLL("011", 0, 9LL, 3, 0); TEST_STRTOLL("-11", 8, -9LL, 3, 0); TEST_STRTOLL("-011", 8, -9LL, 4, 0); TEST_STRTOLL("-011", 0, -9LL, 4, 0); TEST_STRTOLL("Text", 0, 0LL, 0, 0); TEST_STRTOLL("9223372036854775807", 10, 9223372036854775807LL, 19, 0); TEST_STRTOLL("9223372036854775807", 0, 9223372036854775807LL, 19, 0); TEST_STRTOLL("9223372036854775808", 0, 9223372036854775807LL, 19, ERANGE); TEST_STRTOLL("9223372036854775808", 10, 9223372036854775807LL, 19, ERANGE); TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LL, 18, 0); TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 18, 0); TEST_STRTOLL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 16, 0); TEST_STRTOLL("0x8000000000000000", 0, 9223372036854775807LL, 18, ERANGE); TEST_STRTOLL("0x8000000000000000", 16, 9223372036854775807LL, 18, ERANGE); TEST_STRTOLL("80000000000000000", 16, 9223372036854775807LL, 17, ERANGE); TEST_STRTOLL("0777777777777777777777", 0, 9223372036854775807LL, 22, 0); TEST_STRTOLL("0777777777777777777777", 8, 9223372036854775807LL, 22, 0); TEST_STRTOLL("777777777777777777777", 8, 9223372036854775807LL, 21, 0); TEST_STRTOLL("01000000000000000000000", 0, 9223372036854775807LL, 23, ERANGE); TEST_STRTOLL("01000000000000000000000", 8, 9223372036854775807LL, 23, ERANGE); TEST_STRTOLL("1000000000000000000000", 8, 9223372036854775807LL, 22, ERANGE); TEST_STRTOLL("-9223372036854775808", 10, -9223372036854775807LL -1, 20, 0); TEST_STRTOLL("-9223372036854775808", 0, -9223372036854775807LL -1, 20, 0); TEST_STRTOLL("-9223372036854775809", 0, -9223372036854775807LL -1, 20, ERANGE); TEST_STRTOLL("-9223372036854775809", 10, -9223372036854775807LL -1, 20, ERANGE); TEST_STRTOLL("-0x8000000000000000", 0, -9223372036854775807LL -1, 19, 0); TEST_STRTOLL("-0x8000000000000000", 16, -9223372036854775807LL -1, 19, 0); TEST_STRTOLL("-8000000000000000", 16, -9223372036854775807LL -1, 17, 0); TEST_STRTOLL("-0x8000000000000001", 0, -9223372036854775807LL -1, 19, ERANGE); TEST_STRTOLL("-0x8000000000000001", 16, -9223372036854775807LL -1, 19, ERANGE); TEST_STRTOLL("-80000000000000001", 16, -9223372036854775807LL -1, 18, ERANGE); TEST_STRTOLL("-01000000000000000000000",0, -9223372036854775807LL -1, 24, 0); TEST_STRTOLL("-01000000000000000000000",8, -9223372036854775807LL -1, 24, 0); TEST_STRTOLL("-1000000000000000000000", 8, -9223372036854775807LL -1, 23, 0); TEST_STRTOLL("-01000000000000000000001",0, -9223372036854775807LL -1, 24, ERANGE); TEST_STRTOLL("-01000000000000000000001",8, -9223372036854775807LL -1, 24, ERANGE); TEST_STRTOLL("-1000000000000000000001", 8, -9223372036854775807LL -1, 23, ERANGE); printf("success: strtoll\n"); return true; } static int test_strtoull(void) { printf("test: strtoull\n"); #define TEST_STRTOULL(str,base,res,diff,errnoo) TEST_STRTO_X(long long unsigned int,"%llu",strtoull,str,base,res,diff,errnoo) TEST_STRTOULL("15", 10, 15LLU, 2, 0); TEST_STRTOULL(" 15", 10, 15LLU, 4, 0); TEST_STRTOULL("15", 0, 15LLU, 2, 0); TEST_STRTOULL(" 15 ", 0, 15LLU, 3, 0); TEST_STRTOULL("+15", 10, 15LLU, 3, 0); TEST_STRTOULL(" +15", 10, 15LLU, 5, 0); TEST_STRTOULL("+15", 0, 15LLU, 3, 0); TEST_STRTOULL(" +15 ", 0, 15LLU, 4, 0); TEST_STRTOULL("-15", 10, 18446744073709551601LLU, 3, 0); TEST_STRTOULL(" -15", 10, 18446744073709551601LLU, 5, 0); TEST_STRTOULL("-15", 0, 18446744073709551601LLU, 3, 0); TEST_STRTOULL(" -15 ", 0, 18446744073709551601LLU, 4, 0); TEST_STRTOULL("015", 10, 15LLU, 3, 0); TEST_STRTOULL(" 015", 10, 15LLU, 5, 0); TEST_STRTOULL("015", 0, 13LLU, 3, 0); TEST_STRTOULL(" 015", 0, 13LLU, 5, 0); TEST_STRTOULL("0x15", 10, 0LLU, 1, 0); TEST_STRTOULL(" 0x15", 10, 0LLU, 3, 0); TEST_STRTOULL("0x15", 0, 21LLU, 4, 0); TEST_STRTOULL(" 0x15", 0, 21LLU, 6, 0); TEST_STRTOULL("10", 16, 16LLU, 2, 0); TEST_STRTOULL(" 10 ", 16, 16LLU, 4, 0); TEST_STRTOULL("0x10", 16, 16LLU, 4, 0); TEST_STRTOULL("0x10", 0, 16LLU, 4, 0); TEST_STRTOULL(" 0x10 ", 0, 16LLU, 5, 0); TEST_STRTOULL("+10", 16, 16LLU, 3, 0); TEST_STRTOULL(" +10 ", 16, 16LLU, 5, 0); TEST_STRTOULL("+0x10", 16, 16LLU, 5, 0); TEST_STRTOULL("+0x10", 0, 16LLU, 5, 0); TEST_STRTOULL(" +0x10 ", 0, 16LLU, 6, 0); TEST_STRTOULL("-10", 16, -16LLU, 3, 0); TEST_STRTOULL(" -10 ", 16, -16LLU, 5, 0); TEST_STRTOULL("-0x10", 16, -16LLU, 5, 0); TEST_STRTOULL("-0x10", 0, -16LLU, 5, 0); TEST_STRTOULL(" -0x10 ", 0, -16LLU, 6, 0); TEST_STRTOULL("010", 16, 16LLU, 3, 0); TEST_STRTOULL(" 010 ", 16, 16LLU, 5, 0); TEST_STRTOULL("-010", 16, -16LLU, 4, 0); TEST_STRTOULL("11", 8, 9LLU, 2, 0); TEST_STRTOULL("011", 8, 9LLU, 3, 0); TEST_STRTOULL("011", 0, 9LLU, 3, 0); TEST_STRTOULL("-11", 8, -9LLU, 3, 0); TEST_STRTOULL("-011", 8, -9LLU, 4, 0); TEST_STRTOULL("-011", 0, -9LLU, 4, 0); TEST_STRTOULL("011", 8, 9LLU, 3, 0); TEST_STRTOULL("011", 0, 9LLU, 3, 0); TEST_STRTOULL("-11", 8, -9LLU, 3, 0); TEST_STRTOULL("-011", 8, -9LLU, 4, 0); TEST_STRTOULL("-011", 0, -9LLU, 4, 0); TEST_STRTOULL("Text", 0, 0LLU, 0, 0); TEST_STRTOULL("9223372036854775807", 10, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("9223372036854775807", 0, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("9223372036854775808", 0, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("9223372036854775808", 10, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LLU, 18, 0); TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 18, 0); TEST_STRTOULL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 16, 0); TEST_STRTOULL("0x8000000000000000", 0, 9223372036854775808LLU, 18, 0); TEST_STRTOULL("0x8000000000000000", 16, 9223372036854775808LLU, 18, 0); TEST_STRTOULL("8000000000000000", 16, 9223372036854775808LLU, 16, 0); TEST_STRTOULL("0777777777777777777777", 0, 9223372036854775807LLU, 22, 0); TEST_STRTOULL("0777777777777777777777", 8, 9223372036854775807LLU, 22, 0); TEST_STRTOULL("777777777777777777777", 8, 9223372036854775807LLU, 21, 0); TEST_STRTOULL("01000000000000000000000",0, 9223372036854775808LLU, 23, 0); TEST_STRTOULL("01000000000000000000000",8, 9223372036854775808LLU, 23, 0); TEST_STRTOULL("1000000000000000000000", 8, 9223372036854775808LLU, 22, 0); TEST_STRTOULL("-9223372036854775808", 10, 9223372036854775808LLU, 20, 0); TEST_STRTOULL("-9223372036854775808", 0, 9223372036854775808LLU, 20, 0); TEST_STRTOULL("-9223372036854775809", 0, 9223372036854775807LLU, 20, 0); TEST_STRTOULL("-9223372036854775809", 10, 9223372036854775807LLU, 20, 0); TEST_STRTOULL("-0x8000000000000000", 0, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("-0x8000000000000000", 16, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("-8000000000000000", 16, 9223372036854775808LLU, 17, 0); TEST_STRTOULL("-0x8000000000000001", 0, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("-0x8000000000000001", 16, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("-8000000000000001", 16, 9223372036854775807LLU, 17, 0); TEST_STRTOULL("-01000000000000000000000",0, 9223372036854775808LLU, 24, 0); TEST_STRTOULL("-01000000000000000000000",8, 9223372036854775808LLU, 24, 0); TEST_STRTOULL("-1000000000000000000000",8, 9223372036854775808LLU, 23, 0); TEST_STRTOULL("-01000000000000000000001",0, 9223372036854775807LLU, 24, 0); TEST_STRTOULL("-01000000000000000000001",8, 9223372036854775807LLU, 24, 0); TEST_STRTOULL("-1000000000000000000001",8, 9223372036854775807LLU, 23, 0); TEST_STRTOULL("18446744073709551615", 0, 18446744073709551615LLU, 20, 0); TEST_STRTOULL("18446744073709551615", 10, 18446744073709551615LLU, 20, 0); TEST_STRTOULL("18446744073709551616", 0, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("18446744073709551616", 10, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 0, 18446744073709551615LLU, 18, 0); TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 18, 0); TEST_STRTOULL("FFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 16, 0); TEST_STRTOULL("0x10000000000000000", 0, 18446744073709551615LLU, 19, ERANGE); TEST_STRTOULL("0x10000000000000000", 16, 18446744073709551615LLU, 19, ERANGE); TEST_STRTOULL("10000000000000000", 16, 18446744073709551615LLU, 17, ERANGE); TEST_STRTOULL("01777777777777777777777",0, 18446744073709551615LLU, 23, 0); TEST_STRTOULL("01777777777777777777777",8, 18446744073709551615LLU, 23, 0); TEST_STRTOULL("1777777777777777777777", 8, 18446744073709551615LLU, 22, 0); TEST_STRTOULL("02000000000000000000000",0, 18446744073709551615LLU, 23, ERANGE); TEST_STRTOULL("02000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE); TEST_STRTOULL("2000000000000000000000", 8, 18446744073709551615LLU, 22, ERANGE); TEST_STRTOULL("-18446744073709551615", 0, 1LLU, 21, 0); TEST_STRTOULL("-18446744073709551615", 10, 1LLU, 21, 0); TEST_STRTOULL("-18446744073709551616", 0, 18446744073709551615LLU, 21, ERANGE); TEST_STRTOULL("-18446744073709551616", 10, 18446744073709551615LLU, 21, ERANGE); TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 0, 1LLU, 19, 0); TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 16, 1LLU, 19, 0); TEST_STRTOULL("-FFFFFFFFFFFFFFFF", 16, 1LLU, 17, 0); TEST_STRTOULL("-0x10000000000000000", 0, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("-0x10000000000000000", 16, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("-10000000000000000", 16, 18446744073709551615LLU, 18, ERANGE); TEST_STRTOULL("-01777777777777777777777",0, 1LLU, 24, 0); TEST_STRTOULL("-01777777777777777777777",8, 1LLU, 24, 0); TEST_STRTOULL("-1777777777777777777777",8, 1LLU, 23, 0); TEST_STRTOULL("-02000000000000000000000",0, 18446744073709551615LLU, 24, ERANGE); TEST_STRTOULL("-02000000000000000000000",8, 18446744073709551615LLU, 24, ERANGE); TEST_STRTOULL("-2000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE); printf("success: strtoull\n"); return true; } /* FIXME: Types: bool socklen_t uint{8,16,32,64}_t int{8,16,32,64}_t intptr_t Constants: PATH_NAME_MAX UINT{16,32,64}_MAX INT32_MAX */ static int test_va_copy(void) { /* FIXME */ return true; } static int test_FUNCTION(void) { printf("test: FUNCTION\n"); if (strcmp(__FUNCTION__, "test_FUNCTION") != 0) { printf("failure: FUNCTION [\nFUNCTION invalid\n]\n"); return false; } printf("success: FUNCTION\n"); return true; } static int test_MIN(void) { printf("test: MIN\n"); if (MIN(20, 1) != 1) { printf("failure: MIN [\nMIN invalid\n]\n"); return false; } if (MIN(1, 20) != 1) { printf("failure: MIN [\nMIN invalid\n]\n"); return false; } printf("success: MIN\n"); return true; } static int test_MAX(void) { printf("test: MAX\n"); if (MAX(20, 1) != 20) { printf("failure: MAX [\nMAX invalid\n]\n"); return false; } if (MAX(1, 20) != 20) { printf("failure: MAX [\nMAX invalid\n]\n"); return false; } printf("success: MAX\n"); return true; } static int test_socketpair(void) { int sock[2]; char buf[20]; printf("test: socketpair\n"); if (socketpair(AF_UNIX, SOCK_STREAM, 0, sock) == -1) { printf("failure: socketpair [\n" "socketpair() failed\n" "]\n"); return false; } if (write(sock[1], "automatisch", 12) == -1) { printf("failure: socketpair [\n" "write() failed: %s\n" "]\n", strerror(errno)); return false; } if (read(sock[0], buf, 12) == -1) { printf("failure: socketpair [\n" "read() failed: %s\n" "]\n", strerror(errno)); return false; } if (strcmp(buf, "automatisch") != 0) { printf("failure: socketpair [\n" "expected: automatisch, got: %s\n" "]\n", buf); return false; } printf("success: socketpair\n"); return true; } extern int libreplace_test_strptime(void); static int test_strptime(void) { return libreplace_test_strptime(); } extern int getifaddrs_test(void); static int test_getifaddrs(void) { printf("test: getifaddrs\n"); if (getifaddrs_test() != 0) { printf("failure: getifaddrs\n"); return false; } printf("success: getifaddrs\n"); return true; } static int test_utime(void) { struct utimbuf u; struct stat st1, st2, st3; int fd; printf("test: utime\n"); unlink(TESTFILE); fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); if (fd == -1) { printf("failure: utime [\n" "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); return false; } if (fstat(fd, &st1) != 0) { printf("failure: utime [\n" "fstat (1) failed - %s\n]\n", strerror(errno)); return false; } u.actime = st1.st_atime + 300; u.modtime = st1.st_mtime - 300; if (utime(TESTFILE, &u) != 0) { printf("failure: utime [\n" "utime(&u) failed - %s\n]\n", strerror(errno)); return false; } if (fstat(fd, &st2) != 0) { printf("failure: utime [\n" "fstat (2) failed - %s\n]\n", strerror(errno)); return false; } if (utime(TESTFILE, NULL) != 0) { printf("failure: utime [\n" "utime(NULL) failed - %s\n]\n", strerror(errno)); return false; } if (fstat(fd, &st3) != 0) { printf("failure: utime [\n" "fstat (3) failed - %s\n]\n", strerror(errno)); return false; } #define CMP_VAL(a,c,b) do { \ if (a c b) { \ printf("failure: utime [\n" \ "%s: %s(%d) %s %s(%d)\n]\n", \ __location__, \ #a, (int)a, #c, #b, (int)b); \ return false; \ } \ } while(0) #define EQUAL_VAL(a,b) CMP_VAL(a,!=,b) #define GREATER_VAL(a,b) CMP_VAL(a,<=,b) #define LESSER_VAL(a,b) CMP_VAL(a,>=,b) EQUAL_VAL(st2.st_atime, st1.st_atime + 300); EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300); LESSER_VAL(st3.st_atime, st2.st_atime); GREATER_VAL(st3.st_mtime, st2.st_mtime); #undef CMP_VAL #undef EQUAL_VAL #undef GREATER_VAL #undef LESSER_VAL unlink(TESTFILE); printf("success: utime\n"); return true; } static int test_utimes(void) { struct timeval tv[2]; struct stat st1, st2; int fd; printf("test: utimes\n"); unlink(TESTFILE); fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); if (fd == -1) { printf("failure: utimes [\n" "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); return false; } if (fstat(fd, &st1) != 0) { printf("failure: utimes [\n" "fstat (1) failed - %s\n]\n", strerror(errno)); return false; } ZERO_STRUCT(tv); tv[0].tv_sec = st1.st_atime + 300; tv[1].tv_sec = st1.st_mtime - 300; if (utimes(TESTFILE, tv) != 0) { printf("failure: utimes [\n" "utimes(tv) failed - %s\n]\n", strerror(errno)); return false; } if (fstat(fd, &st2) != 0) { printf("failure: utimes [\n" "fstat (2) failed - %s\n]\n", strerror(errno)); return false; } #define EQUAL_VAL(a,b) do { \ if (a != b) { \ printf("failure: utimes [\n" \ "%s: %s(%d) != %s(%d)\n]\n", \ __location__, \ #a, (int)a, #b, (int)b); \ return false; \ } \ } while(0) EQUAL_VAL(st2.st_atime, st1.st_atime + 300); EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300); #undef EQUAL_VAL unlink(TESTFILE); printf("success: utimes\n"); return true; } static int test_memmem(void) { char *s; printf("test: memmem\n"); s = (char *)memmem("foo", 3, "fo", 2); if (strcmp(s, "foo") != 0) { printf(__location__ ": Failed memmem\n"); return false; } s = (char *)memmem("foo", 3, "", 0); /* it is allowable for this to return NULL (as happens on FreeBSD) */ if (s && strcmp(s, "foo") != 0) { printf(__location__ ": Failed memmem\n"); return false; } s = (char *)memmem("foo", 4, "o", 1); if (strcmp(s, "oo") != 0) { printf(__location__ ": Failed memmem\n"); return false; } s = (char *)memmem("foobarfodx", 11, "fod", 3); if (strcmp(s, "fodx") != 0) { printf(__location__ ": Failed memmem\n"); return false; } printf("success: memmem\n"); return true; } bool torture_local_replace(struct torture_context *ctx) { bool ret = true; ret &= test_ftruncate(); ret &= test_strlcpy(); ret &= test_strlcat(); ret &= test_mktime(); ret &= test_initgroups(); ret &= test_memmove(); ret &= test_strdup(); ret &= test_setlinebuf(); ret &= test_vsyslog(); ret &= test_timegm(); ret &= test_setenv(); ret &= test_strndup(); ret &= test_strnlen(); ret &= test_waitpid(); ret &= test_seteuid(); ret &= test_setegid(); ret &= test_asprintf(); ret &= test_snprintf(); ret &= test_vasprintf(); ret &= test_vsnprintf(); ret &= test_opendir(); ret &= test_readdir(); ret &= test_telldir(); ret &= test_seekdir(); ret &= test_dlopen(); ret &= test_chroot(); ret &= test_bzero(); ret &= test_strerror(); ret &= test_errno(); ret &= test_mkdtemp(); ret &= test_mkstemp(); ret &= test_pread(); ret &= test_pwrite(); ret &= test_inet_ntoa(); ret &= test_strtoll(); ret &= test_strtoull(); ret &= test_va_copy(); ret &= test_FUNCTION(); ret &= test_MIN(); ret &= test_MAX(); ret &= test_socketpair(); ret &= test_strptime(); ret &= test_getifaddrs(); ret &= test_utime(); ret &= test_utimes(); ret &= test_memmem(); return ret; } ntdb-1.0/lib/replace/timegm.c000066400000000000000000000047641224151530700161230ustar00rootroot00000000000000/* * Copyright (c) 1997 Kungliga Tekniska Högskolan * (Royal Institute of Technology, Stockholm, Sweden). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* adapted for Samba4 by Andrew Tridgell */ #include "replace.h" #include "system/time.h" static int is_leap(unsigned y) { y += 1900; return (y % 4) == 0 && ((y % 100) != 0 || (y % 400) == 0); } time_t rep_timegm(struct tm *tm) { static const unsigned ndays[2][12] ={ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}}; time_t res = 0; unsigned i; if (tm->tm_mon > 12 || tm->tm_mon < 0 || tm->tm_mday > 31 || tm->tm_min > 60 || tm->tm_sec > 60 || tm->tm_hour > 24) { /* invalid tm structure */ return 0; } for (i = 70; i < tm->tm_year; ++i) res += is_leap(i) ? 366 : 365; for (i = 0; i < tm->tm_mon; ++i) res += ndays[is_leap(tm->tm_year)][i]; res += tm->tm_mday - 1; res *= 24; res += tm->tm_hour; res *= 60; res += tm->tm_min; res *= 60; res += tm->tm_sec; return res; } ntdb-1.0/lib/replace/win32_replace.h000066400000000000000000000104641224151530700172750ustar00rootroot00000000000000#ifndef _WIN32_REPLACE_H #define _WIN32_REPLACE_H #ifdef HAVE_WINSOCK2_H #include #endif #ifdef HAVE_WS2TCPIP_H #include #endif #ifdef HAVE_WINDOWS_H #include #endif /* Map BSD Socket errorcodes to the WSA errorcodes (if possible) */ #define EAFNOSUPPORT WSAEAFNOSUPPORT #define ECONNREFUSED WSAECONNREFUSED #define EINPROGRESS WSAEINPROGRESS #define EMSGSIZE WSAEMSGSIZE #define ENOBUFS WSAENOBUFS #define ENOTSOCK WSAENOTSOCK #define ENETUNREACH WSAENETUNREACH #define ENOPROTOOPT WSAENOPROTOOPT #define ENOTCONN WSAENOTCONN #define ENOTSUP 134 /* We undefine the following constants due to conflicts with the w32api headers * and the Windows Platform SDK/DDK. */ #undef interface #undef ERROR_INVALID_PARAMETER #undef ERROR_INSUFFICIENT_BUFFER #undef ERROR_INVALID_DATATYPE #undef FILE_GENERIC_READ #undef FILE_GENERIC_WRITE #undef FILE_GENERIC_EXECUTE #undef FILE_ATTRIBUTE_READONLY #undef FILE_ATTRIBUTE_HIDDEN #undef FILE_ATTRIBUTE_SYSTEM #undef FILE_ATTRIBUTE_DIRECTORY #undef FILE_ATTRIBUTE_ARCHIVE #undef FILE_ATTRIBUTE_DEVICE #undef FILE_ATTRIBUTE_NORMAL #undef FILE_ATTRIBUTE_TEMPORARY #undef FILE_ATTRIBUTE_REPARSE_POINT #undef FILE_ATTRIBUTE_COMPRESSED #undef FILE_ATTRIBUTE_OFFLINE #undef FILE_ATTRIBUTE_ENCRYPTED #undef FILE_FLAG_WRITE_THROUGH #undef FILE_FLAG_NO_BUFFERING #undef FILE_FLAG_RANDOM_ACCESS #undef FILE_FLAG_SEQUENTIAL_SCAN #undef FILE_FLAG_DELETE_ON_CLOSE #undef FILE_FLAG_BACKUP_SEMANTICS #undef FILE_FLAG_POSIX_SEMANTICS #undef FILE_TYPE_DISK #undef FILE_TYPE_UNKNOWN #undef FILE_CASE_SENSITIVE_SEARCH #undef FILE_CASE_PRESERVED_NAMES #undef FILE_UNICODE_ON_DISK #undef FILE_PERSISTENT_ACLS #undef FILE_FILE_COMPRESSION #undef FILE_VOLUME_QUOTAS #undef FILE_VOLUME_IS_COMPRESSED #undef FILE_NOTIFY_CHANGE_FILE_NAME #undef FILE_NOTIFY_CHANGE_DIR_NAME #undef FILE_NOTIFY_CHANGE_ATTRIBUTES #undef FILE_NOTIFY_CHANGE_SIZE #undef FILE_NOTIFY_CHANGE_LAST_WRITE #undef FILE_NOTIFY_CHANGE_LAST_ACCESS #undef FILE_NOTIFY_CHANGE_CREATION #undef FILE_NOTIFY_CHANGE_EA #undef FILE_NOTIFY_CHANGE_SECURITY #undef FILE_NOTIFY_CHANGE_STREAM_NAME #undef FILE_NOTIFY_CHANGE_STREAM_SIZE #undef FILE_NOTIFY_CHANGE_STREAM_WRITE #undef FILE_NOTIFY_CHANGE_NAME #undef PRINTER_ATTRIBUTE_QUEUED #undef PRINTER_ATTRIBUTE_DIRECT #undef PRINTER_ATTRIBUTE_DEFAULT #undef PRINTER_ATTRIBUTE_SHARED #undef PRINTER_ATTRIBUTE_NETWORK #undef PRINTER_ATTRIBUTE_HIDDEN #undef PRINTER_ATTRIBUTE_LOCAL #undef PRINTER_ATTRIBUTE_ENABLE_DEVQ #undef PRINTER_ATTRIBUTE_KEEPPRINTEDJOBS #undef PRINTER_ATTRIBUTE_DO_COMPLETE_FIRST #undef PRINTER_ATTRIBUTE_WORK_OFFLINE #undef PRINTER_ATTRIBUTE_ENABLE_BIDI #undef PRINTER_ATTRIBUTE_RAW_ONLY #undef PRINTER_ATTRIBUTE_PUBLISHED #undef PRINTER_ENUM_DEFAULT #undef PRINTER_ENUM_LOCAL #undef PRINTER_ENUM_CONNECTIONS #undef PRINTER_ENUM_FAVORITE #undef PRINTER_ENUM_NAME #undef PRINTER_ENUM_REMOTE #undef PRINTER_ENUM_SHARED #undef PRINTER_ENUM_NETWORK #undef PRINTER_ENUM_EXPAND #undef PRINTER_ENUM_CONTAINER #undef PRINTER_ENUM_ICON1 #undef PRINTER_ENUM_ICON2 #undef PRINTER_ENUM_ICON3 #undef PRINTER_ENUM_ICON4 #undef PRINTER_ENUM_ICON5 #undef PRINTER_ENUM_ICON6 #undef PRINTER_ENUM_ICON7 #undef PRINTER_ENUM_ICON8 #undef PRINTER_STATUS_PAUSED #undef PRINTER_STATUS_ERROR #undef PRINTER_STATUS_PENDING_DELETION #undef PRINTER_STATUS_PAPER_JAM #undef PRINTER_STATUS_PAPER_OUT #undef PRINTER_STATUS_MANUAL_FEED #undef PRINTER_STATUS_PAPER_PROBLEM #undef PRINTER_STATUS_OFFLINE #undef PRINTER_STATUS_IO_ACTIVE #undef PRINTER_STATUS_BUSY #undef PRINTER_STATUS_PRINTING #undef PRINTER_STATUS_OUTPUT_BIN_FULL #undef PRINTER_STATUS_NOT_AVAILABLE #undef PRINTER_STATUS_WAITING #undef PRINTER_STATUS_PROCESSING #undef PRINTER_STATUS_INITIALIZING #undef PRINTER_STATUS_WARMING_UP #undef PRINTER_STATUS_TONER_LOW #undef PRINTER_STATUS_NO_TONER #undef PRINTER_STATUS_PAGE_PUNT #undef PRINTER_STATUS_USER_INTERVENTION #undef PRINTER_STATUS_OUT_OF_MEMORY #undef PRINTER_STATUS_DOOR_OPEN #undef PRINTER_STATUS_SERVER_UNKNOWN #undef PRINTER_STATUS_POWER_SAVE #undef DWORD #undef HKEY_CLASSES_ROOT #undef HKEY_CURRENT_USER #undef HKEY_LOCAL_MACHINE #undef HKEY_USERS #undef HKEY_PERFORMANCE_DATA #undef HKEY_CURRENT_CONFIG #undef HKEY_DYN_DATA #undef REG_DWORD #undef REG_QWORD #undef SERVICE_STATE_ALL #undef SE_GROUP_MANDATORY #undef SE_GROUP_ENABLED_BY_DEFAULT #undef SE_GROUP_ENABLED #endif /* _WIN32_REPLACE_H */ ntdb-1.0/lib/replace/wscript000066400000000000000000000724441224151530700161130ustar00rootroot00000000000000#!/usr/bin/env python APPNAME = 'libreplace' VERSION = '1.2.1' blddir = 'bin' import sys, os, Utils # find the buildtools directory srcdir = '.' while not os.path.exists(srcdir+'/buildtools') and len(srcdir.split('/')) < 5: srcdir = '../' + srcdir sys.path.insert(0, srcdir + '/buildtools/wafsamba') import wafsamba, samba_dist import Options, os, preproc samba_dist.DIST_DIRS('lib/replace buildtools:buildtools') def set_options(opt): opt.BUILTIN_DEFAULT('NONE') opt.PRIVATE_EXTENSION_DEFAULT('') opt.RECURSE('buildtools/wafsamba') @wafsamba.runonce def configure(conf): conf.RECURSE('buildtools/wafsamba') conf.env.standalone_replace = conf.IN_LAUNCH_DIR() conf.DEFINE('HAVE_LIBREPLACE', 1) conf.DEFINE('LIBREPLACE_NETWORK_CHECKS', 1) # on Tru64 certain features are only available with _OSF_SOURCE set to 1 # and _XOPEN_SOURCE set to 600 if conf.env['SYSTEM_UNAME_SYSNAME'] == 'OSF1': conf.DEFINE('_OSF_SOURCE', 1, add_to_cflags=True) conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) # SCM_RIGHTS is only avail if _XOPEN_SOURCE iÑ• defined on IRIX if conf.env['SYSTEM_UNAME_SYSNAME'] == 'IRIX': conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) conf.DEFINE('_BSD_TYPES', 1, add_to_cflags=True) # Try to find the right extra flags for C99 initialisers for f in ["", "-AC99", "-qlanglvl=extc99", "-qlanglvl=stdc99", "-c99"]: if conf.CHECK_CFLAGS([f], ''' struct foo {int x;char y;}; struct foo bar = { .y = 'X', .x = 1 }; '''): if f != "": conf.ADD_CFLAGS(f) break if conf.CHECK_CFLAGS(['-fstack-protector']) and conf.CHECK_LDFLAGS(['-fstack-protector']): conf.ADD_CFLAGS('-fstack-protector') conf.ADD_LDFLAGS('-fstack-protector') # Try to find the right extra flags for -Werror behaviour for f in ["-Werror", # GCC "-errwarn=%all", # Sun Studio "-qhalt=w", # IBM xlc "-w2", # Tru64 ]: if conf.CHECK_CFLAGS([f], ''' '''): if not 'WERROR_CFLAGS' in conf.env: conf.env['WERROR_CFLAGS'] = [] conf.env['WERROR_CFLAGS'].extend([f]) break conf.CHECK_HEADERS('linux/types.h crypt.h locale.h acl/libacl.h compat.h') conf.CHECK_HEADERS('acl/libacl.h attr/xattr.h compat.h ctype.h dustat.h') conf.CHECK_HEADERS('fcntl.h fnmatch.h glob.h history.h krb5.h langinfo.h') conf.CHECK_HEADERS('libaio.h locale.h ndir.h pwd.h') conf.CHECK_HEADERS('shadow.h sys/acl.h') conf.CHECK_HEADERS('sys/attributes.h attr/attributes.h sys/capability.h sys/dir.h sys/epoll.h') conf.CHECK_HEADERS('sys/fcntl.h sys/filio.h sys/filsys.h sys/fs/s5param.h sys/fs/vx/quota.h') conf.CHECK_HEADERS('sys/id.h sys/ioctl.h sys/ipc.h sys/mman.h sys/mode.h sys/ndir.h sys/priv.h') conf.CHECK_HEADERS('sys/resource.h sys/security.h sys/shm.h sys/statfs.h sys/statvfs.h sys/termio.h') conf.CHECK_HEADERS('sys/vfs.h sys/xattr.h termio.h termios.h sys/file.h') conf.CHECK_HEADERS('sys/ucontext.h sys/wait.h sys/stat.h malloc.h grp.h') conf.CHECK_HEADERS('sys/select.h setjmp.h utime.h sys/syslog.h syslog.h') conf.CHECK_HEADERS('stdarg.h vararg.h sys/mount.h mntent.h') conf.CHECK_HEADERS('stropts.h unix.h string.h strings.h sys/param.h limits.h') conf.CHECK_HEADERS('''sys/socket.h netinet/in.h netdb.h arpa/inet.h netinet/in_systm.h netinet/ip.h netinet/tcp.h netinet/in_ip.h sys/sockio.h sys/un.h''', together=True) conf.CHECK_HEADERS('sys/uio.h ifaddrs.h direct.h dirent.h') conf.CHECK_HEADERS('windows.h winsock2.h ws2tcpip.h') conf.CHECK_HEADERS('errno.h') conf.CHECK_HEADERS('gcrypt.h getopt.h iconv.h') conf.CHECK_HEADERS('sys/inotify.h memory.h nss.h sasl/sasl.h') conf.CHECK_HEADERS('security/pam_appl.h zlib.h asm/unistd.h') conf.CHECK_HEADERS('aio.h sys/unistd.h rpc/rpc.h rpc/nettype.h alloca.h float.h') conf.CHECK_HEADERS('rpcsvc/nis.h rpcsvc/ypclnt.h sys/sysctl.h') conf.CHECK_HEADERS('sys/fileio.h sys/filesys.h sys/dustat.h sys/sysmacros.h') conf.CHECK_HEADERS('xfs/libxfs.h netgroup.h') conf.CHECK_CODE('', headers='rpc/rpc.h rpcsvc/yp_prot.h', define='HAVE_RPCSVC_YP_PROT_H') conf.CHECK_HEADERS('valgrind.h valgrind/valgrind.h valgrind/memcheck.h') conf.CHECK_HEADERS('nss_common.h nsswitch.h ns_api.h') conf.CHECK_HEADERS('sys/extattr.h sys/ea.h sys/proplist.h sys/cdefs.h') conf.CHECK_HEADERS('utmp.h utmpx.h lastlog.h malloc.h') conf.CHECK_HEADERS('syscall.h sys/syscall.h inttypes.h') # Check for process set name support conf.CHECK_CODE(''' #include int main(void) { prctl(0); return 0; } ''', 'HAVE_PRCTL', headers='sys/prctl.h', msg='Checking for prctl syscall') conf.CHECK_CODE(''' #include #ifdef HAVE_FCNTL_H #include #endif int main(void) { int fd = open("/dev/null", O_DIRECT); } ''', define='HAVE_OPEN_O_DIRECT', addmain=False, msg='Checking for O_DIRECT flag to open(2)') conf.CHECK_TYPES('"long long" intptr_t uintptr_t ptrdiff_t comparison_fn_t') conf.CHECK_TYPE('_Bool', define='HAVE__Bool') conf.CHECK_TYPE('bool', define='HAVE_BOOL') conf.CHECK_TYPE('int8_t', 'char') conf.CHECK_TYPE('uint8_t', 'unsigned char') conf.CHECK_TYPE('int16_t', 'short') conf.CHECK_TYPE('uint16_t', 'unsigned short') conf.CHECK_TYPE('int32_t', 'int') conf.CHECK_TYPE('uint32_t', 'unsigned') conf.CHECK_TYPE('int64_t', 'long long') conf.CHECK_TYPE('uint64_t', 'unsigned long long') conf.CHECK_TYPE('size_t', 'unsigned int') conf.CHECK_TYPE('ssize_t', 'int') conf.CHECK_TYPE('ino_t', 'unsigned') conf.CHECK_TYPE('loff_t', 'off_t') conf.CHECK_TYPE('offset_t', 'loff_t') conf.CHECK_TYPE('volatile int', define='HAVE_VOLATILE') conf.CHECK_TYPE('uint_t', 'unsigned int') conf.CHECK_TYPE('blksize_t', 'long', headers='sys/types.h sys/stat.h unistd.h') conf.CHECK_TYPE('blkcnt_t', 'long', headers='sys/types.h sys/stat.h unistd.h') conf.CHECK_SIZEOF('bool char int "long long" long short size_t ssize_t') conf.CHECK_SIZEOF('int8_t uint8_t int16_t uint16_t int32_t uint32_t int64_t uint64_t') conf.CHECK_SIZEOF('void*', define='SIZEOF_VOID_P') conf.CHECK_SIZEOF('off_t dev_t ino_t time_t') conf.CHECK_TYPES('socklen_t', headers='sys/socket.h') conf.CHECK_TYPE_IN('struct ifaddrs', 'ifaddrs.h') conf.CHECK_TYPE_IN('struct addrinfo', 'netdb.h') conf.CHECK_TYPE_IN('struct sockaddr', 'sys/socket.h') conf.CHECK_CODE('struct sockaddr_in6 x', define='HAVE_STRUCT_SOCKADDR_IN6', headers='sys/socket.h netdb.h netinet/in.h') conf.CHECK_TYPE_IN('struct sockaddr_storage', 'sys/socket.h') conf.CHECK_TYPE_IN('sa_family_t', 'sys/socket.h') conf.CHECK_TYPE_IN('sig_atomic_t', 'signal.h', define='HAVE_SIG_ATOMIC_T_TYPE') conf.CHECK_FUNCS_IN('''inet_ntoa inet_aton inet_ntop inet_pton connect gethostbyname getaddrinfo getnameinfo freeaddrinfo gai_strerror socketpair''', 'socket nsl', checklibc=True, headers='sys/socket.h netinet/in.h arpa/inet.h netdb.h') # Some old Linux systems have broken header files and # miss the IPV6_V6ONLY define in netinet/in.h, # but have it in linux/in6.h. # We can't include both files so we just check if the value # if defined and do the replacement in system/network.h if not conf.CHECK_VARIABLE('IPV6_V6ONLY', headers='sys/socket.h netdb.h netinet/in.h'): conf.CHECK_CODE(''' #include #if (IPV6_V6ONLY != 26) #error no IPV6_V6ONLY support on linux #endif int main(void) { return IPV6_V6ONLY; } ''', define='HAVE_LINUX_IPV6_V6ONLY_26', addmain=False, msg='Checking for IPV6_V6ONLY in linux/in6.h', local_include=False) conf.CHECK_CODE(''' struct sockaddr_storage sa_store; struct addrinfo *ai = NULL; struct in6_addr in6addr; int idx = if_nametoindex("iface1"); int s = socket(AF_INET6, SOCK_STREAM, 0); int ret = getaddrinfo(NULL, NULL, NULL, &ai); if (ret != 0) { const char *es = gai_strerror(ret); } freeaddrinfo(ai); { int val = 1; #ifdef HAVE_LINUX_IPV6_V6ONLY_26 #define IPV6_V6ONLY 26 #endif ret = setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, (const void *)&val, sizeof(val)); } ''', define='HAVE_IPV6', lib='nsl socket', headers='sys/socket.h netdb.h netinet/in.h') if conf.CONFIG_SET('HAVE_SYS_UCONTEXT_H') and conf.CONFIG_SET('HAVE_SIGNAL_H'): conf.CHECK_CODE(''' ucontext_t uc; sigaddset(&uc.uc_sigmask, SIGUSR1); ''', 'HAVE_UCONTEXT_T', msg="Checking whether we have ucontext_t", headers='signal.h sys/ucontext.h') # these may be builtins, so we need the link=False strategy conf.CHECK_FUNCS('strdup memmem printf memset memcpy memmove strcpy strncpy bzero', link=False) # See https://bugzilla.samba.org/show_bug.cgi?id=1097 # # Ported in from autoconf where it was added with this commit: # commit 804cfb20a067b4b687089dc72a8271b3abf20f31 # Author: Simo Sorce # Date: Wed Aug 25 14:24:16 2004 +0000 # r2070: Let's try to overload srnlen and strndup for AIX where they are natly broken. host_os = sys.platform if host_os.rfind('aix') > -1: conf.DEFINE('BROKEN_STRNLEN', 1) conf.DEFINE('BROKEN_STRNDUP', 1) conf.CHECK_FUNCS('shl_load shl_unload shl_findsym') conf.CHECK_FUNCS('pipe strftime srandom random srand rand usleep setbuffer') conf.CHECK_FUNCS('lstat getpgrp utime utimes setuid seteuid setreuid setresuid setgid setegid') conf.CHECK_FUNCS('setregid setresgid chroot strerror vsyslog setlinebuf mktime') conf.CHECK_FUNCS('ftruncate chsize rename waitpid wait4') conf.CHECK_FUNCS('initgroups pread pwrite strndup strcasestr') conf.CHECK_FUNCS('strtok_r mkdtemp dup2 dprintf vdprintf isatty chown lchown') conf.CHECK_FUNCS('link readlink symlink realpath snprintf vsnprintf') conf.CHECK_FUNCS('asprintf vasprintf setenv unsetenv strnlen strtoull __strtoull') conf.CHECK_FUNCS('strtouq strtoll __strtoll strtoq memalign posix_memalign') conf.CHECK_FUNCS('prctl') # libbsd on some platforms provides strlcpy and strlcat if not conf.CHECK_FUNCS('strlcpy strlcat'): conf.CHECK_FUNCS_IN('strlcpy strlcat', 'bsd', headers='bsd/string.h', checklibc=True) if not conf.CHECK_FUNCS('getpeereid'): conf.CHECK_FUNCS_IN('getpeereid', 'bsd', headers='sys/types.h bsd/unistd.h') if not conf.CHECK_FUNCS_IN('setproctitle', 'bsd', headers='sys/types.h bsd/unistd.h'): conf.CHECK_FUNCS_IN('setproctitle', 'setproctitle', headers='setproctitle.h') conf.CHECK_CODE(''' struct ucred cred; socklen_t cred_len; int ret = getsockopt(0, SOL_SOCKET, SO_PEERCRED, &cred, &cred_len);''', 'HAVE_PEERCRED', msg="Checking whether we can use SO_PEERCRED to get socket credentials", headers='sys/types.h sys/socket.h') #Some OS (ie. freebsd) return EINVAL if the convertion could not be done, it's not what we expect #Let's detect those cases if conf.CONFIG_SET('HAVE_STRTOLL'): conf.CHECK_CODE(''' long long nb = strtoll("Text", NULL, 0); if (errno == EINVAL) { return 0; } else { return 1; } ''', msg="Checking correct behavior of strtoll", headers = 'errno.h', execute = True, define = 'HAVE_BSD_STRTOLL', ) conf.CHECK_FUNCS('if_nametoindex strerror_r') conf.CHECK_FUNCS('getdirentries getdents syslog') conf.CHECK_FUNCS('gai_strerror get_current_dir_name') conf.CHECK_FUNCS('timegm getifaddrs freeifaddrs mmap setgroups syscall setsid') conf.CHECK_FUNCS('getgrent_r getgrgid_r getgrnam_r getgrouplist getpagesize') conf.CHECK_FUNCS('getpwent_r getpwnam_r getpwuid_r epoll_create') conf.SET_TARGET_TYPE('attr', 'EMPTY') xattr_headers='sys/attributes.h attr/xattr.h sys/xattr.h' conf.CHECK_FUNCS_IN(''' fgetxattr flistea flistxattr fremovexattr fsetxattr getxattr listxattr removexattr setxattr ''', 'attr', checklibc=True, headers=xattr_headers) # We need to check for linux xattrs first, as we do not wish to link to -lattr # (the XFS compat API) on Linux systems with the native xattr API if not conf.CONFIG_SET('HAVE_GETXATTR'): conf.CHECK_FUNCS_IN(''' attr_get attr_getf attr_list attr_listf attropen attr_remove attr_removef attr_set attr_setf extattr_delete_fd extattr_delete_file extattr_get_fd extattr_get_file extattr_list_fd extattr_list_file extattr_set_fd extattr_set_file fgetea fremoveea fsetea getea listea removeea setea ''', 'attr', checklibc=True, headers=xattr_headers) if (conf.CONFIG_SET('HAVE_ATTR_LISTF') or conf.CONFIG_SET('HAVE_EXTATTR_LIST_FD') or conf.CONFIG_SET('HAVE_FLISTEA') or conf.CONFIG_SET('HAVE_FLISTXATTR')): conf.DEFINE('HAVE_XATTR_SUPPORT', 1) # Darwin has extra options to xattr-family functions conf.CHECK_CODE('getxattr(NULL, NULL, NULL, 0, 0, 0)', headers=xattr_headers, local_include=False, define='XATTR_ADDITIONAL_OPTIONS', msg="Checking whether xattr interface takes additional options") conf.CHECK_FUNCS_IN('dlopen dlsym dlerror dlclose', 'dl', checklibc=True, headers='dlfcn.h dl.h') conf.CHECK_C_PROTOTYPE('dlopen', 'void *dlopen(const char* filename, unsigned int flags)', define='DLOPEN_TAKES_UNSIGNED_FLAGS', headers='dlfcn.h dl.h') if conf.CHECK_FUNCS_IN('fdatasync', 'rt', checklibc=True): # some systems are missing the declaration conf.CHECK_DECLS('fdatasync') if conf.CHECK_FUNCS_IN('clock_gettime', 'rt', checklibc=True): for c in ['CLOCK_MONOTONIC', 'CLOCK_PROCESS_CPUTIME_ID', 'CLOCK_REALTIME']: conf.CHECK_CODE(''' #if TIME_WITH_SYS_TIME # include # include #else # if HAVE_SYS_TIME_H # include # else # include # endif #endif clockid_t clk = %s''' % c, 'HAVE_%s' % c, msg='Checking whether the clock_gettime clock ID %s is available' % c) conf.CHECK_TYPE('struct timespec', headers='sys/time.h time.h') # these headers need to be tested as a group on freebsd conf.CHECK_HEADERS(headers='sys/socket.h net/if.h', together=True) conf.CHECK_HEADERS(headers='netinet/in.h arpa/nameser.h resolv.h', together=True) conf.CHECK_FUNCS_IN('res_search', 'resolv', checklibc=True, headers='netinet/in.h arpa/nameser.h resolv.h') conf.env.intl_libs='' if not Options.options.disable_gettext: conf.CHECK_HEADERS('libintl.h') conf.CHECK_LIB('intl') # *textdomain functions are not strictly necessary conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset', '', checklibc=True, headers='libintl.h') # gettext and dgettext must exist # on some systems (the ones with glibc, those are in libc) if conf.CHECK_FUNCS_IN('dgettext gettext', '', checklibc=True, headers='libintl.h'): # save for dependency definitions conf.env.intl_libs='' # others (e.g. FreeBSD) have seperate libintl elif conf.CHECK_FUNCS_IN('dgettext gettext', 'intl', checklibc=False, headers='libintl.h'): # save for dependency definitions conf.env.intl_libs='intl' # recheck with libintl conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset', 'intl', checklibc=False, headers='libintl.h') else: # Some hosts need lib iconv for linking with lib intl # So we try with flags just in case it helps. oldflags = conf.env['EXTRA_LDFLAGS']; conf.env['EXTRA_LDFLAGS'].extend("-liconv") conf.CHECK_FUNCS_IN('dgettext gettext bindtextdomain textdomain bind_textdomain_codeset', 'intl', checklibc=False, headers='libintl.h') conf.env['EXTRA_LDFLAGS'] = oldflags if conf.env['HAVE_GETTEXT'] and conf.env['HAVE_DGETTEXT']: # save for dependency definitions conf.env.intl_libs='iconv intl' else: conf.fatal('library gettext not found, try specifying the path to ' + 'it with --with-gettext= or ' + '--without-gettext to build without''') conf.CHECK_FUNCS_IN('pthread_create', 'pthread', checklibc=True, headers='pthread.h') conf.CHECK_FUNCS_IN('crypt', 'crypt', checklibc=True) conf.CHECK_VARIABLE('rl_event_hook', define='HAVE_DECL_RL_EVENT_HOOK', always=True, headers='readline.h readline/readline.h readline/history.h') conf.CHECK_DECLS('snprintf vsnprintf asprintf vasprintf') conf.CHECK_DECLS('errno', headers='errno.h', reverse=True) conf.CHECK_DECLS('environ getgrent_r getpwent_r', reverse=True, headers='pwd.h grp.h') conf.CHECK_DECLS('pread pwrite setenv setresgid setresuid', reverse=True) if conf.CONFIG_SET('HAVE_EPOLL_CREATE') and conf.CONFIG_SET('HAVE_SYS_EPOLL_H'): conf.DEFINE('HAVE_EPOLL', 1) conf.CHECK_HEADERS('poll.h') conf.CHECK_FUNCS('poll') conf.CHECK_FUNCS('strptime') conf.CHECK_DECLS('strptime', headers='time.h') conf.CHECK_CODE('''#define LIBREPLACE_CONFIGURE_TEST_STRPTIME #include "test/strptime.c"''', define='HAVE_WORKING_STRPTIME', execute=True, addmain=False, msg='Checking for working strptime') conf.CHECK_CODE('gettimeofday(NULL, NULL)', 'HAVE_GETTIMEOFDAY_TZ', execute=False) conf.CHECK_CODE('#include "test/snprintf.c"', define="HAVE_C99_VSNPRINTF", execute=True, addmain=False, msg="Checking for C99 vsnprintf") conf.CHECK_CODE('#include "test/shared_mmap.c"', addmain=False, add_headers=False, execute=True, define='HAVE_SHARED_MMAP', msg="Checking for HAVE_SHARED_MMAP") conf.CHECK_CODE('#include "test/shared_mremap.c"', addmain=False, add_headers=False, execute=True, define='HAVE_MREMAP', msg="Checking for HAVE_MREMAP") # OpenBSD (and I've heard HPUX) doesn't sync between mmap and write. # FIXME: Anything other than a 0 or 1 exit code should abort configure! conf.CHECK_CODE('#include "test/incoherent_mmap.c"', addmain=False, add_headers=False, execute=True, define='HAVE_INCOHERENT_MMAP', msg="Checking for HAVE_INCOHERENT_MMAP") conf.SAMBA_BUILD_ENV() conf.CHECK_CODE(''' typedef struct {unsigned x;} FOOBAR; #define X_FOOBAR(x) ((FOOBAR) { x }) #define FOO_ONE X_FOOBAR(1) FOOBAR f = FOO_ONE; static const struct { FOOBAR y; } f2[] = { {FOO_ONE} }; static const FOOBAR f3[] = {FOO_ONE}; ''', define='HAVE_IMMEDIATE_STRUCTURES') conf.CHECK_CODE('mkdir("foo",0777)', define='HAVE_MKDIR_MODE', headers='sys/stat.h') conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_mtim.tv_nsec', define='HAVE_STAT_TV_NSEC', headers='sys/stat.h') # we need the st_rdev test under two names conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', define='HAVE_STRUCT_STAT_ST_RDEV', headers='sys/stat.h') conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', define='HAVE_ST_RDEV', headers='sys/stat.h') conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', 'ss_family', headers='sys/socket.h netinet/in.h') conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', '__ss_family', headers='sys/socket.h netinet/in.h') if conf.CHECK_STRUCTURE_MEMBER('struct sockaddr', 'sa_len', headers='sys/socket.h netinet/in.h', define='HAVE_SOCKADDR_SA_LEN'): # the old build system produced both defines conf.DEFINE('HAVE_STRUCT_SOCKADDR_SA_LEN', 1) conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_in', 'sin_len', headers='sys/socket.h netinet/in.h', define='HAVE_SOCK_SIN_LEN') conf.CHECK_CODE('struct sockaddr_un sunaddr; sunaddr.sun_family = AF_UNIX;', define='HAVE_UNIXSOCKET', headers='sys/socket.h sys/un.h') conf.CHECK_CODE(''' struct stat st; char tpl[20]="/tmp/test.XXXXXX"; char tpl2[20]="/tmp/test.XXXXXX"; int fd = mkstemp(tpl); int fd2 = mkstemp(tpl2); if (fd == -1) { if (fd2 != -1) { unlink(tpl2); } exit(1); } if (fd2 == -1) exit(1); unlink(tpl); unlink(tpl2); if (fstat(fd, &st) != 0) exit(1); if ((st.st_mode & 0777) != 0600) exit(1); if (strcmp(tpl, "/tmp/test.XXXXXX") == 0) { exit(1); } if (strcmp(tpl, tpl2) == 0) { exit(1); } exit(0); ''', define='HAVE_SECURE_MKSTEMP', execute=True, mandatory=True) # lets see if we get a mandatory failure for this one if conf.CHECK_CFLAGS('-fvisibility=hidden'): conf.env.VISIBILITY_CFLAGS = '-fvisibility=hidden' conf.CHECK_CODE('''void vis_foo1(void) {} __attribute__((visibility("default"))) void vis_foo2(void) {}''', cflags=conf.env.VISIBILITY_CFLAGS, define='HAVE_VISIBILITY_ATTR') # look for a method of finding the list of network interfaces for method in ['HAVE_IFACE_GETIFADDRS', 'HAVE_IFACE_AIX', 'HAVE_IFACE_IFCONF', 'HAVE_IFACE_IFREQ']: if conf.CHECK_CODE(''' #define %s 1 #define NO_CONFIG_H 1 #define AUTOCONF_TEST 1 #define SOCKET_WRAPPER_NOT_REPLACE #include "replace.c" #include "inet_ntop.c" #include "snprintf.c" #include "getifaddrs.c" #define getifaddrs_test main #include "test/getifaddrs.c" ''' % method, method, lib='nsl socket', addmain=False, execute=True): break conf.RECURSE('system') conf.SAMBA_CONFIG_H() REPLACEMENT_FUNCTIONS = { 'replace.c': ['ftruncate', 'strlcpy', 'strlcat', 'mktime', 'initgroups', 'memmove', 'strdup', 'setlinebuf', 'vsyslog', 'strnlen', 'strndup', 'waitpid', 'seteuid', 'setegid', 'chroot', 'mkstemp', 'mkdtemp', 'pread', 'pwrite', 'strcasestr', 'strtok_r', 'strtoll', 'strtoull', 'setenv', 'unsetenv', 'utime', 'utimes', 'dup2', 'chown', 'link', 'readlink', 'symlink', 'lchown', 'realpath', 'memmem', 'vdprintf', 'dprintf', 'get_current_dir_name', 'strerror_r', 'clock_gettime'], 'timegm.c': ['timegm'], # Note: C99_VSNPRINTF is not a function, but a special condition # for replacement 'snprintf.c': ['C99_VSNPRINTF', 'snprintf', 'vsnprintf', 'asprintf', 'vasprintf'], # Note: WORKING_STRPTIME is not a function, but a special condition # for replacement 'strptime.c': ['WORKING_STRPTIME', 'strptime'], } def build(bld): bld.RECURSE('buildtools/wafsamba') REPLACE_HOSTCC_SOURCE = '' for filename, functions in REPLACEMENT_FUNCTIONS.iteritems(): for function in functions: if not bld.CONFIG_SET('HAVE_%s' % function.upper()): REPLACE_HOSTCC_SOURCE += ' %s' % filename break extra_libs = '' if bld.CONFIG_SET('HAVE_LIBBSD'): extra_libs += ' bsd' bld.SAMBA_SUBSYSTEM('LIBREPLACE_HOSTCC', REPLACE_HOSTCC_SOURCE, use_hostcc=True, use_global_deps=False, cflags='-DSOCKET_WRAPPER_DISABLE=1 -DNSS_WRAPPER_DISABLE=1 -DUID_WRAPPER_DISABLE=1 -D_SAMBA_HOSTCC_', group='compiler_libraries', deps = extra_libs ) REPLACE_SOURCE = REPLACE_HOSTCC_SOURCE if not bld.CONFIG_SET('HAVE_CRYPT'): REPLACE_SOURCE += ' crypt.c' if not bld.CONFIG_SET('HAVE_DLOPEN'): REPLACE_SOURCE += ' dlfcn.c' if not bld.CONFIG_SET('HAVE_POLL'): REPLACE_SOURCE += ' poll.c' if not bld.CONFIG_SET('HAVE_SOCKETPAIR'): REPLACE_SOURCE += ' socketpair.c' if not bld.CONFIG_SET('HAVE_CONNECT'): REPLACE_SOURCE += ' socket.c' if not bld.CONFIG_SET('HAVE_GETIFADDRS'): REPLACE_SOURCE += ' getifaddrs.c' if not bld.CONFIG_SET('HAVE_GETADDRINFO'): REPLACE_SOURCE += ' getaddrinfo.c' if not bld.CONFIG_SET('HAVE_INET_NTOA'): REPLACE_SOURCE += ' inet_ntoa.c' if not bld.CONFIG_SET('HAVE_INET_ATON'): REPLACE_SOURCE += ' inet_aton.c' if not bld.CONFIG_SET('HAVE_INET_NTOP'): REPLACE_SOURCE += ' inet_ntop.c' if not bld.CONFIG_SET('HAVE_INET_PTON'): REPLACE_SOURCE += ' inet_pton.c' if not bld.CONFIG_SET('HAVE_GETXATTR') or bld.CONFIG_SET('XATTR_ADDITIONAL_OPTIONS'): REPLACE_SOURCE += ' xattr.c' bld.SAMBA_LIBRARY('replace', source=REPLACE_SOURCE, group='base_libraries', # FIXME: Ideally symbols should be hidden here so they # don't appear in the global namespace when Samba # libraries are loaded, but this doesn't appear to work # at the moment: # hide_symbols=bld.BUILTIN_LIBRARY('replace'), private_library=True, deps='crypt dl nsl socket rt attr' + extra_libs) bld.SAMBA_SUBSYSTEM('replace-test', source='''test/testsuite.c test/strptime.c test/os2_delete.c test/getifaddrs.c''', deps='replace') if bld.env.standalone_replace: bld.SAMBA_BINARY('replace_testsuite', source='test/main.c', deps='replace replace-test', install=False) # build replacements for stdint.h and stdbool.h if needed bld.SAMBA_GENERATOR('replace_stdint_h', rule='cp ${SRC} ${TGT}', source='hdr_replace.h', target='stdint.h', enabled = not bld.CONFIG_SET('HAVE_STDINT_H')) bld.SAMBA_GENERATOR('replace_stdbool_h', rule='cp ${SRC} ${TGT}', source='hdr_replace.h', target='stdbool.h', enabled = not bld.CONFIG_SET('HAVE_STDBOOL_H')) bld.SAMBA_SUBSYSTEM('samba_intl', source='', use_global_deps=False,deps=bld.env.intl_libs) def dist(): '''makes a tarball for distribution''' samba_dist.dist() ntdb-1.0/lib/replace/xattr.c000066400000000000000000000477031224151530700160030ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. replacement routines for xattr implementations Copyright (C) Jeremy Allison 1998-2005 Copyright (C) Timur Bakeyev 2005 Copyright (C) Bjoern Jacke 2006-2007 Copyright (C) Herb Lewis 2003 Copyright (C) Andrew Bartlett 2012 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/filesys.h" #include "system/dir.h" /******** Solaris EA helper function prototypes ********/ #ifdef HAVE_ATTROPEN #define SOLARIS_ATTRMODE S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP static int solaris_write_xattr(int attrfd, const char *value, size_t size); static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size); static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size); static int solaris_unlinkat(int attrdirfd, const char *name); static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode); static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode); #endif /************************************************************************** Wrappers for extented attribute calls. Based on the Linux package with support for IRIX and (Net|Free)BSD also. Expand as other systems have them. ****************************************************************************/ ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size) { #if defined(HAVE_GETXATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return getxattr(path, name, value, size); #else /* So that we do not recursivly call this function */ #undef getxattr int options = 0; return getxattr(path, name, value, size, 0, options); #endif #elif defined(HAVE_GETEA) return getea(path, name, value, size); #elif defined(HAVE_EXTATTR_GET_FILE) char *s; ssize_t retval; int attrnamespace = (strncmp(name, "system", 6) == 0) ? EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER; const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1; /* * The BSD implementation has a nasty habit of silently truncating * the returned value to the size of the buffer, so we have to check * that the buffer is large enough to fit the returned value. */ if((retval=extattr_get_file(path, attrnamespace, attrname, NULL, 0)) >= 0) { if (size == 0) { return retval; } else if (retval > size) { errno = ERANGE; return -1; } if((retval=extattr_get_file(path, attrnamespace, attrname, value, size)) >= 0) return retval; } return -1; #elif defined(HAVE_ATTR_GET) int retval, flags = 0; int valuelength = (int)size; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; retval = attr_get(path, attrname, (char *)value, &valuelength, flags); if (size == 0 && retval == -1 && errno == E2BIG) { return valuelength; } return retval ? retval : valuelength; #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrfd = solaris_attropen(path, name, O_RDONLY, 0); if (attrfd >= 0) { ret = solaris_read_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size) { #if defined(HAVE_FGETXATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return fgetxattr(filedes, name, value, size); #else /* So that we do not recursivly call this function */ #undef fgetxattr int options = 0; return fgetxattr(filedes, name, value, size, 0, options); #endif #elif defined(HAVE_FGETEA) return fgetea(filedes, name, value, size); #elif defined(HAVE_EXTATTR_GET_FD) char *s; ssize_t retval; int attrnamespace = (strncmp(name, "system", 6) == 0) ? EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER; const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1; if((retval=extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0)) >= 0) { if (size == 0) { return retval; } else if (retval > size) { errno = ERANGE; return -1; } if((retval=extattr_get_fd(filedes, attrnamespace, attrname, value, size)) >= 0) return retval; } return -1; #elif defined(HAVE_ATTR_GETF) int retval, flags = 0; int valuelength = (int)size; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; retval = attr_getf(filedes, attrname, (char *)value, &valuelength, flags); if (size == 0 && retval == -1 && errno == E2BIG) { return valuelength; } return retval ? retval : valuelength; #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrfd = solaris_openat(filedes, name, O_RDONLY|O_XATTR, 0); if (attrfd >= 0) { ret = solaris_read_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } #if defined(HAVE_EXTATTR_LIST_FILE) #define EXTATTR_PREFIX(s) (s), (sizeof((s))-1) static struct { int space; const char *name; size_t len; } extattr[] = { { EXTATTR_NAMESPACE_SYSTEM, EXTATTR_PREFIX("system.") }, { EXTATTR_NAMESPACE_USER, EXTATTR_PREFIX("user.") }, }; typedef union { const char *path; int filedes; } extattr_arg; static ssize_t bsd_attr_list (int type, extattr_arg arg, char *list, size_t size) { ssize_t list_size, total_size = 0; int i, t, len; char *buf; /* Iterate through extattr(2) namespaces */ for(t = 0; t < ARRAY_SIZE(extattr); t++) { if (t != EXTATTR_NAMESPACE_USER && geteuid() != 0) { /* ignore all but user namespace when we are not root, see bug 10247 */ continue; } switch(type) { #if defined(HAVE_EXTATTR_LIST_FILE) case 0: list_size = extattr_list_file(arg.path, extattr[t].space, list, size); break; #endif #if defined(HAVE_EXTATTR_LIST_LINK) case 1: list_size = extattr_list_link(arg.path, extattr[t].space, list, size); break; #endif #if defined(HAVE_EXTATTR_LIST_FD) case 2: list_size = extattr_list_fd(arg.filedes, extattr[t].space, list, size); break; #endif default: errno = ENOSYS; return -1; } /* Some error happend. Errno should be set by the previous call */ if(list_size < 0) return -1; /* No attributes */ if(list_size == 0) continue; /* XXX: Call with an empty buffer may be used to calculate necessary buffer size. Unfortunately, we can't say, how many attributes were returned, so here is the potential problem with the emulation. */ if(list == NULL) { /* Take the worse case of one char attribute names - two bytes per name plus one more for sanity. */ total_size += list_size + (list_size/2 + 1)*extattr[t].len; continue; } /* Count necessary offset to fit namespace prefixes */ len = 0; for(i = 0; i < list_size; i += list[i] + 1) len += extattr[t].len; total_size += list_size + len; /* Buffer is too small to fit the results */ if(total_size > size) { errno = ERANGE; return -1; } /* Shift results back, so we can prepend prefixes */ buf = (char *)memmove(list + len, list, list_size); for(i = 0; i < list_size; i += len + 1) { len = buf[i]; strncpy(list, extattr[t].name, extattr[t].len + 1); list += extattr[t].len; strncpy(list, buf + i + 1, len); list[len] = '\0'; list += len + 1; } size -= total_size; } return total_size; } #endif #if defined(HAVE_ATTR_LIST) && (defined(HAVE_SYS_ATTRIBUTES_H) || defined(HAVE_ATTR_ATTRIBUTES_H)) static char attr_buffer[ATTR_MAX_VALUELEN]; static ssize_t irix_attr_list(const char *path, int filedes, char *list, size_t size, int flags) { int retval = 0, index; attrlist_cursor_t *cursor = 0; int total_size = 0; attrlist_t * al = (attrlist_t *)attr_buffer; attrlist_ent_t *ae; size_t ent_size, left = size; char *bp = list; while (true) { if (filedes) retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); else retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); if (retval) break; for (index = 0; index < al->al_count; index++) { ae = ATTR_ENTRY(attr_buffer, index); ent_size = strlen(ae->a_name) + sizeof("user."); if (left >= ent_size) { strncpy(bp, "user.", sizeof("user.")); strncat(bp, ae->a_name, ent_size - sizeof("user.")); bp += ent_size; left -= ent_size; } else if (size) { errno = ERANGE; retval = -1; break; } total_size += ent_size; } if (al->al_more == 0) break; } if (retval == 0) { flags |= ATTR_ROOT; cursor = 0; while (true) { if (filedes) retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); else retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); if (retval) break; for (index = 0; index < al->al_count; index++) { ae = ATTR_ENTRY(attr_buffer, index); ent_size = strlen(ae->a_name) + sizeof("system."); if (left >= ent_size) { strncpy(bp, "system.", sizeof("system.")); strncat(bp, ae->a_name, ent_size - sizeof("system.")); bp += ent_size; left -= ent_size; } else if (size) { errno = ERANGE; retval = -1; break; } total_size += ent_size; } if (al->al_more == 0) break; } } return (ssize_t)(retval ? retval : total_size); } #endif ssize_t rep_listxattr (const char *path, char *list, size_t size) { #if defined(HAVE_LISTXATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return listxattr(path, list, size); #else /* So that we do not recursivly call this function */ #undef listxattr int options = 0; return listxattr(path, list, size, options); #endif #elif defined(HAVE_LISTEA) return listea(path, list, size); #elif defined(HAVE_EXTATTR_LIST_FILE) extattr_arg arg; arg.path = path; return bsd_attr_list(0, arg, list, size); #elif defined(HAVE_ATTR_LIST) && defined(HAVE_SYS_ATTRIBUTES_H) return irix_attr_list(path, 0, list, size, 0); #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0); if (attrdirfd >= 0) { ret = solaris_list_xattr(attrdirfd, list, size); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } ssize_t rep_flistxattr (int filedes, char *list, size_t size) { #if defined(HAVE_FLISTXATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return flistxattr(filedes, list, size); #else /* So that we do not recursivly call this function */ #undef flistxattr int options = 0; return flistxattr(filedes, list, size, options); #endif #elif defined(HAVE_FLISTEA) return flistea(filedes, list, size); #elif defined(HAVE_EXTATTR_LIST_FD) extattr_arg arg; arg.filedes = filedes; return bsd_attr_list(2, arg, list, size); #elif defined(HAVE_ATTR_LISTF) return irix_attr_list(NULL, filedes, list, size, 0); #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0); if (attrdirfd >= 0) { ret = solaris_list_xattr(attrdirfd, list, size); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_removexattr (const char *path, const char *name) { #if defined(HAVE_REMOVEXATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return removexattr(path, name); #else /* So that we do not recursivly call this function */ #undef removexattr int options = 0; return removexattr(path, name, options); #endif #elif defined(HAVE_REMOVEEA) return removeea(path, name); #elif defined(HAVE_EXTATTR_DELETE_FILE) char *s; int attrnamespace = (strncmp(name, "system", 6) == 0) ? EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER; const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1; return extattr_delete_file(path, attrnamespace, attrname); #elif defined(HAVE_ATTR_REMOVE) int flags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; return attr_remove(path, attrname, flags); #elif defined(HAVE_ATTROPEN) int ret = -1; int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0); if (attrdirfd >= 0) { ret = solaris_unlinkat(attrdirfd, name); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_fremovexattr (int filedes, const char *name) { #if defined(HAVE_FREMOVEXATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return fremovexattr(filedes, name); #else /* So that we do not recursivly call this function */ #undef fremovexattr int options = 0; return fremovexattr(filedes, name, options); #endif #elif defined(HAVE_FREMOVEEA) return fremoveea(filedes, name); #elif defined(HAVE_EXTATTR_DELETE_FD) char *s; int attrnamespace = (strncmp(name, "system", 6) == 0) ? EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER; const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1; return extattr_delete_fd(filedes, attrnamespace, attrname); #elif defined(HAVE_ATTR_REMOVEF) int flags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; return attr_removef(filedes, attrname, flags); #elif defined(HAVE_ATTROPEN) int ret = -1; int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0); if (attrdirfd >= 0) { ret = solaris_unlinkat(attrdirfd, name); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags) { #if defined(HAVE_SETXATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return setxattr(path, name, value, size, flags); #else /* So that we do not recursivly call this function */ #undef setxattr int options = 0; return setxattr(path, name, value, size, 0, options); #endif #elif defined(HAVE_SETEA) return setea(path, name, value, size, flags); #elif defined(HAVE_EXTATTR_SET_FILE) char *s; int retval = 0; int attrnamespace = (strncmp(name, "system", 6) == 0) ? EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER; const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1; if (flags) { /* Check attribute existence */ retval = extattr_get_file(path, attrnamespace, attrname, NULL, 0); if (retval < 0) { /* REPLACE attribute, that doesn't exist */ if (flags & XATTR_REPLACE && errno == ENOATTR) { errno = ENOATTR; return -1; } /* Ignore other errors */ } else { /* CREATE attribute, that already exists */ if (flags & XATTR_CREATE) { errno = EEXIST; return -1; } } } retval = extattr_set_file(path, attrnamespace, attrname, value, size); return (retval < 0) ? -1 : 0; #elif defined(HAVE_ATTR_SET) int myflags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT; if (flags & XATTR_CREATE) myflags |= ATTR_CREATE; if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE; return attr_set(path, attrname, (const char *)value, size, myflags); #elif defined(HAVE_ATTROPEN) int ret = -1; int myflags = O_RDWR; int attrfd; if (flags & XATTR_CREATE) myflags |= O_EXCL; if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT; attrfd = solaris_attropen(path, name, myflags, (mode_t) SOLARIS_ATTRMODE); if (attrfd >= 0) { ret = solaris_write_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags) { #if defined(HAVE_FSETXATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return fsetxattr(filedes, name, value, size, flags); #else /* So that we do not recursivly call this function */ #undef fsetxattr int options = 0; return fsetxattr(filedes, name, value, size, 0, options); #endif #elif defined(HAVE_FSETEA) return fsetea(filedes, name, value, size, flags); #elif defined(HAVE_EXTATTR_SET_FD) char *s; int retval = 0; int attrnamespace = (strncmp(name, "system", 6) == 0) ? EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER; const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1; if (flags) { /* Check attribute existence */ retval = extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0); if (retval < 0) { /* REPLACE attribute, that doesn't exist */ if (flags & XATTR_REPLACE && errno == ENOATTR) { errno = ENOATTR; return -1; } /* Ignore other errors */ } else { /* CREATE attribute, that already exists */ if (flags & XATTR_CREATE) { errno = EEXIST; return -1; } } } retval = extattr_set_fd(filedes, attrnamespace, attrname, value, size); return (retval < 0) ? -1 : 0; #elif defined(HAVE_ATTR_SETF) int myflags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT; if (flags & XATTR_CREATE) myflags |= ATTR_CREATE; if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE; return attr_setf(filedes, attrname, (const char *)value, size, myflags); #elif defined(HAVE_ATTROPEN) int ret = -1; int myflags = O_RDWR | O_XATTR; int attrfd; if (flags & XATTR_CREATE) myflags |= O_EXCL; if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT; attrfd = solaris_openat(filedes, name, myflags, (mode_t) SOLARIS_ATTRMODE); if (attrfd >= 0) { ret = solaris_write_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } /************************************************************************** helper functions for Solaris' EA support ****************************************************************************/ #ifdef HAVE_ATTROPEN static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size) { struct stat sbuf; if (fstat(attrfd, &sbuf) == -1) { errno = ENOATTR; return -1; } /* This is to return the current size of the named extended attribute */ if (size == 0) { return sbuf.st_size; } /* check size and read xattr */ if (sbuf.st_size > size) { errno = ERANGE; return -1; } return read(attrfd, value, sbuf.st_size); } static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size) { ssize_t len = 0; DIR *dirp; struct dirent *de; int newfd = dup(attrdirfd); /* CAUTION: The originating file descriptor should not be used again following the call to fdopendir(). For that reason we dup() the file descriptor here to make things more clear. */ dirp = fdopendir(newfd); while ((de = readdir(dirp))) { size_t listlen = strlen(de->d_name) + 1; if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) { /* we don't want "." and ".." here: */ continue; } if (size == 0) { /* return the current size of the list of extended attribute names*/ len += listlen; } else { /* check size and copy entrieÑ• + nul into list. */ if ((len + listlen) > size) { errno = ERANGE; len = -1; break; } else { strlcpy(list + len, de->d_name, listlen); len += listlen; } } } if (closedir(dirp) == -1) { return -1; } return len; } static int solaris_unlinkat(int attrdirfd, const char *name) { if (unlinkat(attrdirfd, name, 0) == -1) { if (errno == ENOENT) { errno = ENOATTR; } return -1; } return 0; } static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode) { int filedes = attropen(path, attrpath, oflag, mode); if (filedes == -1) { if (errno == EINVAL) { errno = ENOTSUP; } else { errno = ENOATTR; } } return filedes; } static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode) { int filedes = openat(fildes, path, oflag, mode); if (filedes == -1) { if (errno == EINVAL) { errno = ENOTSUP; } else { errno = ENOATTR; } } return filedes; } static int solaris_write_xattr(int attrfd, const char *value, size_t size) { if ((ftruncate(attrfd, 0) == 0) && (write(attrfd, value, size) == size)) { return 0; } else { return -1; } } #endif /*HAVE_ATTROPEN*/ ntdb-1.0/lock.c000066400000000000000000000564501224151530700134070ustar00rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 ** NOTE! The following LGPL license applies to the ntdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #include /* If we were threaded, we could wait for unlock, but we're not, so fail. */ enum NTDB_ERROR owner_conflict(struct ntdb_context *ntdb, const char *call) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_USE_ERROR, "%s: lock owned by another ntdb in this process.", call); } /* If we fork, we no longer really own locks. */ bool check_lock_pid(struct ntdb_context *ntdb, const char *call, bool log) { /* No locks? No problem! */ if (ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0) { return true; } /* No fork? No problem! */ if (ntdb->file->locker == getpid()) { return true; } if (log) { ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_USE_ERROR, "%s: fork() detected after lock acquisition!" " (%u vs %u)", call, (unsigned int)ntdb->file->locker, (unsigned int)getpid()); } return false; } int ntdb_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag, void *unused) { struct flock fl; int ret; do { fl.l_type = rw; fl.l_whence = SEEK_SET; fl.l_start = off; fl.l_len = len; if (waitflag) ret = fcntl(fd, F_SETLKW, &fl); else ret = fcntl(fd, F_SETLK, &fl); } while (ret != 0 && errno == EINTR); return ret; } int ntdb_fcntl_unlock(int fd, int rw, off_t off, off_t len, void *unused) { struct flock fl; int ret; do { fl.l_type = F_UNLCK; fl.l_whence = SEEK_SET; fl.l_start = off; fl.l_len = len; ret = fcntl(fd, F_SETLKW, &fl); } while (ret != 0 && errno == EINTR); return ret; } static int lock(struct ntdb_context *ntdb, int rw, off_t off, off_t len, bool waitflag) { int ret; if (ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0) { ntdb->file->locker = getpid(); } ntdb->stats.lock_lowlevel++; ret = ntdb->lock_fn(ntdb->file->fd, rw, off, len, waitflag, ntdb->lock_data); if (!waitflag) { ntdb->stats.lock_nonblock++; if (ret != 0) ntdb->stats.lock_nonblock_fail++; } return ret; } static int unlock(struct ntdb_context *ntdb, int rw, off_t off, off_t len) { #if 0 /* Check they matched up locks and unlocks correctly. */ char line[80]; FILE *locks; bool found = false; locks = fopen("/proc/locks", "r"); while (fgets(line, 80, locks)) { char *p; int type, start, l; /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */ p = strchr(line, ':') + 1; if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY "))) continue; p += strlen(" FLOCK ADVISORY "); if (strncmp(p, "READ ", strlen("READ ")) == 0) type = F_RDLCK; else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0) type = F_WRLCK; else abort(); p += 6; if (atoi(p) != getpid()) continue; p = strchr(strchr(p, ' ') + 1, ' ') + 1; start = atoi(p); p = strchr(p, ' ') + 1; if (strncmp(p, "EOF", 3) == 0) l = 0; else l = atoi(p) - start + 1; if (off == start) { if (len != l) { fprintf(stderr, "Len %u should be %u: %s", (int)len, l, line); abort(); } if (type != rw) { fprintf(stderr, "Type %s wrong: %s", rw == F_RDLCK ? "READ" : "WRITE", line); abort(); } found = true; break; } } if (!found) { fprintf(stderr, "Unlock on %u@%u not found!", (int)off, (int)len); abort(); } fclose(locks); #endif return ntdb->unlock_fn(ntdb->file->fd, rw, off, len, ntdb->lock_data); } /* a byte range locking function - return 0 on success this functions locks len bytes at the specified offset. note that a len of zero means lock to end of file */ static enum NTDB_ERROR ntdb_brlock(struct ntdb_context *ntdb, int rw_type, ntdb_off_t offset, ntdb_off_t len, enum ntdb_lock_flags flags) { int ret; if (rw_type == F_WRLCK && (ntdb->flags & NTDB_RDONLY)) { return ntdb_logerr(ntdb, NTDB_ERR_RDONLY, NTDB_LOG_USE_ERROR, "Write lock attempted on read-only database"); } if (ntdb->flags & NTDB_NOLOCK) { return NTDB_SUCCESS; } /* A 32 bit system cannot open a 64-bit file, but it could have * expanded since then: check here. */ if ((size_t)(offset + len) != offset + len) { return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_brlock: lock on giant offset %llu", (long long)(offset + len)); } ret = lock(ntdb, rw_type, offset, len, flags & NTDB_LOCK_WAIT); if (ret != 0) { /* Generic lock error. errno set by fcntl. * EAGAIN is an expected return from non-blocking * locks. */ if (!(flags & NTDB_LOCK_PROBE) && (errno != EAGAIN && errno != EINTR)) { ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_brlock failed (fd=%d) at" " offset %zu rw_type=%d flags=%d len=%zu:" " %s", ntdb->file->fd, (size_t)offset, rw_type, flags, (size_t)len, strerror(errno)); } return NTDB_ERR_LOCK; } return NTDB_SUCCESS; } static enum NTDB_ERROR ntdb_brunlock(struct ntdb_context *ntdb, int rw_type, ntdb_off_t offset, size_t len) { if (ntdb->flags & NTDB_NOLOCK) { return NTDB_SUCCESS; } if (!check_lock_pid(ntdb, "ntdb_brunlock", false)) return NTDB_ERR_LOCK; if (unlock(ntdb, rw_type, offset, len) == -1) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_brunlock failed (fd=%d) at offset %zu" " rw_type=%d len=%zu: %s", ntdb->file->fd, (size_t)offset, rw_type, (size_t)len, strerror(errno)); } return NTDB_SUCCESS; } /* upgrade a read lock to a write lock. This needs to be handled in a special way as some OSes (such as solaris) have too conservative deadlock detection and claim a deadlock when progress can be made. For those OSes we may loop for a while. */ enum NTDB_ERROR ntdb_allrecord_upgrade(struct ntdb_context *ntdb, off_t start) { int count = 1000; if (!check_lock_pid(ntdb, "ntdb_transaction_prepare_commit", true)) return NTDB_ERR_LOCK; if (ntdb->file->allrecord_lock.count != 1) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_allrecord_upgrade failed:" " count %u too high", ntdb->file->allrecord_lock.count); } if (ntdb->file->allrecord_lock.off != 1) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_allrecord_upgrade failed:" " already upgraded?"); } if (ntdb->file->allrecord_lock.owner != ntdb) { return owner_conflict(ntdb, "ntdb_allrecord_upgrade"); } while (count--) { struct timeval tv; if (ntdb_brlock(ntdb, F_WRLCK, start, 0, NTDB_LOCK_WAIT|NTDB_LOCK_PROBE) == NTDB_SUCCESS) { ntdb->file->allrecord_lock.ltype = F_WRLCK; ntdb->file->allrecord_lock.off = 0; return NTDB_SUCCESS; } if (errno != EDEADLK) { break; } /* sleep for as short a time as we can - more portable than usleep() */ tv.tv_sec = 0; tv.tv_usec = 1; select(0, NULL, NULL, NULL, &tv); } if (errno != EAGAIN && errno != EINTR) ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_allrecord_upgrade failed"); return NTDB_ERR_LOCK; } static struct ntdb_lock *find_nestlock(struct ntdb_context *ntdb, ntdb_off_t offset, const struct ntdb_context *owner) { unsigned int i; for (i=0; ifile->num_lockrecs; i++) { if (ntdb->file->lockrecs[i].off == offset) { if (owner && ntdb->file->lockrecs[i].owner != owner) return NULL; return &ntdb->file->lockrecs[i]; } } return NULL; } enum NTDB_ERROR ntdb_lock_and_recover(struct ntdb_context *ntdb) { enum NTDB_ERROR ecode; if (!check_lock_pid(ntdb, "ntdb_transaction_prepare_commit", true)) return NTDB_ERR_LOCK; ecode = ntdb_allrecord_lock(ntdb, F_WRLCK, NTDB_LOCK_WAIT|NTDB_LOCK_NOCHECK, false); if (ecode != NTDB_SUCCESS) { return ecode; } ecode = ntdb_lock_open(ntdb, F_WRLCK, NTDB_LOCK_WAIT|NTDB_LOCK_NOCHECK); if (ecode != NTDB_SUCCESS) { ntdb_allrecord_unlock(ntdb, F_WRLCK); return ecode; } ecode = ntdb_transaction_recover(ntdb); ntdb_unlock_open(ntdb, F_WRLCK); ntdb_allrecord_unlock(ntdb, F_WRLCK); return ecode; } /* lock an offset in the database. */ static enum NTDB_ERROR ntdb_nest_lock(struct ntdb_context *ntdb, ntdb_off_t offset, int ltype, enum ntdb_lock_flags flags) { struct ntdb_lock *new_lck; enum NTDB_ERROR ecode; assert(offset <= (NTDB_HASH_LOCK_START + (1 << ntdb->hash_bits) + ntdb->file->map_size / 8)); if (ntdb->flags & NTDB_NOLOCK) return NTDB_SUCCESS; if (!check_lock_pid(ntdb, "ntdb_nest_lock", true)) { return NTDB_ERR_LOCK; } ntdb->stats.locks++; new_lck = find_nestlock(ntdb, offset, NULL); if (new_lck) { if (new_lck->owner != ntdb) { return owner_conflict(ntdb, "ntdb_nest_lock"); } if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_nest_lock:" " offset %zu has read lock", (size_t)offset); } /* Just increment the struct, posix locks don't stack. */ new_lck->count++; return NTDB_SUCCESS; } #if 0 if (ntdb->file->num_lockrecs && offset >= NTDB_HASH_LOCK_START && offset < NTDB_HASH_LOCK_START + NTDB_HASH_LOCK_RANGE) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_nest_lock: already have a hash lock?"); } #endif if (ntdb->file->lockrecs == NULL) { new_lck = ntdb->alloc_fn(ntdb->file, sizeof(*ntdb->file->lockrecs), ntdb->alloc_data); } else { new_lck = (struct ntdb_lock *)ntdb->expand_fn( ntdb->file->lockrecs, sizeof(*ntdb->file->lockrecs) * (ntdb->file->num_lockrecs+1), ntdb->alloc_data); } if (new_lck == NULL) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_nest_lock:" " unable to allocate %zu lock struct", ntdb->file->num_lockrecs + 1); } ntdb->file->lockrecs = new_lck; /* Since fcntl locks don't nest, we do a lock for the first one, and simply bump the count for future ones */ ecode = ntdb_brlock(ntdb, ltype, offset, 1, flags); if (ecode != NTDB_SUCCESS) { return ecode; } /* First time we grab a lock, perhaps someone died in commit? */ if (!(flags & NTDB_LOCK_NOCHECK) && ntdb->file->num_lockrecs == 0) { ntdb_bool_err berr = ntdb_needs_recovery(ntdb); if (berr != false) { ntdb_brunlock(ntdb, ltype, offset, 1); if (berr < 0) return NTDB_OFF_TO_ERR(berr); ecode = ntdb_lock_and_recover(ntdb); if (ecode == NTDB_SUCCESS) { ecode = ntdb_brlock(ntdb, ltype, offset, 1, flags); } if (ecode != NTDB_SUCCESS) { return ecode; } } } ntdb->file->lockrecs[ntdb->file->num_lockrecs].owner = ntdb; ntdb->file->lockrecs[ntdb->file->num_lockrecs].off = offset; ntdb->file->lockrecs[ntdb->file->num_lockrecs].count = 1; ntdb->file->lockrecs[ntdb->file->num_lockrecs].ltype = ltype; ntdb->file->num_lockrecs++; return NTDB_SUCCESS; } static enum NTDB_ERROR ntdb_nest_unlock(struct ntdb_context *ntdb, ntdb_off_t off, int ltype) { struct ntdb_lock *lck; enum NTDB_ERROR ecode; if (ntdb->flags & NTDB_NOLOCK) return NTDB_SUCCESS; lck = find_nestlock(ntdb, off, ntdb); if ((lck == NULL) || (lck->count == 0)) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_nest_unlock: no lock for %zu", (size_t)off); } if (lck->count > 1) { lck->count--; return NTDB_SUCCESS; } /* * This lock has count==1 left, so we need to unlock it in the * kernel. We don't bother with decrementing the in-memory array * element, we're about to overwrite it with the last array element * anyway. */ ecode = ntdb_brunlock(ntdb, ltype, off, 1); /* * Shrink the array by overwriting the element just unlocked with the * last array element. */ *lck = ntdb->file->lockrecs[--ntdb->file->num_lockrecs]; return ecode; } /* get the transaction lock */ enum NTDB_ERROR ntdb_transaction_lock(struct ntdb_context *ntdb, int ltype) { return ntdb_nest_lock(ntdb, NTDB_TRANSACTION_LOCK, ltype, NTDB_LOCK_WAIT); } /* release the transaction lock */ void ntdb_transaction_unlock(struct ntdb_context *ntdb, int ltype) { ntdb_nest_unlock(ntdb, NTDB_TRANSACTION_LOCK, ltype); } /* We only need to lock individual bytes, but Linux merges consecutive locks * so we lock in contiguous ranges. */ static enum NTDB_ERROR ntdb_lock_gradual(struct ntdb_context *ntdb, int ltype, enum ntdb_lock_flags flags, ntdb_off_t off, ntdb_off_t len) { enum NTDB_ERROR ecode; enum ntdb_lock_flags nb_flags = (flags & ~NTDB_LOCK_WAIT); if (len <= 1) { /* 0 would mean to end-of-file... */ assert(len != 0); /* Single hash. Just do blocking lock. */ return ntdb_brlock(ntdb, ltype, off, len, flags); } /* First we try non-blocking. */ ecode = ntdb_brlock(ntdb, ltype, off, len, nb_flags); if (ecode != NTDB_ERR_LOCK) { return ecode; } /* Try locking first half, then second. */ ecode = ntdb_lock_gradual(ntdb, ltype, flags, off, len / 2); if (ecode != NTDB_SUCCESS) return ecode; ecode = ntdb_lock_gradual(ntdb, ltype, flags, off + len / 2, len - len / 2); if (ecode != NTDB_SUCCESS) { ntdb_brunlock(ntdb, ltype, off, len / 2); } return ecode; } /* lock/unlock entire database. It can only be upgradable if you have some * other way of guaranteeing exclusivity (ie. transaction write lock). */ enum NTDB_ERROR ntdb_allrecord_lock(struct ntdb_context *ntdb, int ltype, enum ntdb_lock_flags flags, bool upgradable) { enum NTDB_ERROR ecode; ntdb_bool_err berr; if (ntdb->flags & NTDB_NOLOCK) { return NTDB_SUCCESS; } if (!check_lock_pid(ntdb, "ntdb_allrecord_lock", true)) { return NTDB_ERR_LOCK; } if (ntdb->file->allrecord_lock.count) { if (ntdb->file->allrecord_lock.owner != ntdb) { return owner_conflict(ntdb, "ntdb_allrecord_lock"); } if (ltype == F_RDLCK || ntdb->file->allrecord_lock.ltype == F_WRLCK) { ntdb->file->allrecord_lock.count++; return NTDB_SUCCESS; } /* a global lock of a different type exists */ return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_USE_ERROR, "ntdb_allrecord_lock: already have %s lock", ntdb->file->allrecord_lock.ltype == F_RDLCK ? "read" : "write"); } if (ntdb_has_hash_locks(ntdb)) { /* can't combine global and chain locks */ return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_USE_ERROR, "ntdb_allrecord_lock:" " already have chain lock"); } if (upgradable && ltype != F_RDLCK) { /* ntdb error: you can't upgrade a write lock! */ return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_allrecord_lock:" " can't upgrade a write lock"); } ntdb->stats.locks++; again: /* Lock hashes, gradually. */ ecode = ntdb_lock_gradual(ntdb, ltype, flags, NTDB_HASH_LOCK_START, 1 << ntdb->hash_bits); if (ecode != NTDB_SUCCESS) return ecode; /* Lock free tables: there to end of file. */ ecode = ntdb_brlock(ntdb, ltype, NTDB_HASH_LOCK_START + (1 << ntdb->hash_bits), 0, flags); if (ecode != NTDB_SUCCESS) { ntdb_brunlock(ntdb, ltype, NTDB_HASH_LOCK_START, 1 << ntdb->hash_bits); return ecode; } ntdb->file->allrecord_lock.owner = ntdb; ntdb->file->allrecord_lock.count = 1; /* If it's upgradable, it's actually exclusive so we can treat * it as a write lock. */ ntdb->file->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype; ntdb->file->allrecord_lock.off = upgradable; /* Now check for needing recovery. */ if (flags & NTDB_LOCK_NOCHECK) return NTDB_SUCCESS; berr = ntdb_needs_recovery(ntdb); if (likely(berr == false)) return NTDB_SUCCESS; ntdb_allrecord_unlock(ntdb, ltype); if (berr < 0) return NTDB_OFF_TO_ERR(berr); ecode = ntdb_lock_and_recover(ntdb); if (ecode != NTDB_SUCCESS) { return ecode; } goto again; } enum NTDB_ERROR ntdb_lock_open(struct ntdb_context *ntdb, int ltype, enum ntdb_lock_flags flags) { return ntdb_nest_lock(ntdb, NTDB_OPEN_LOCK, ltype, flags); } void ntdb_unlock_open(struct ntdb_context *ntdb, int ltype) { ntdb_nest_unlock(ntdb, NTDB_OPEN_LOCK, ltype); } bool ntdb_has_open_lock(struct ntdb_context *ntdb) { return !(ntdb->flags & NTDB_NOLOCK) && find_nestlock(ntdb, NTDB_OPEN_LOCK, ntdb) != NULL; } enum NTDB_ERROR ntdb_lock_expand(struct ntdb_context *ntdb, int ltype) { /* Lock doesn't protect data, so don't check (we recurse if we do!) */ return ntdb_nest_lock(ntdb, NTDB_EXPANSION_LOCK, ltype, NTDB_LOCK_WAIT | NTDB_LOCK_NOCHECK); } void ntdb_unlock_expand(struct ntdb_context *ntdb, int ltype) { ntdb_nest_unlock(ntdb, NTDB_EXPANSION_LOCK, ltype); } /* unlock entire db */ void ntdb_allrecord_unlock(struct ntdb_context *ntdb, int ltype) { if (ntdb->flags & NTDB_NOLOCK) return; if (ntdb->file->allrecord_lock.count == 0) { ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_USE_ERROR, "ntdb_allrecord_unlock: not locked!"); return; } if (ntdb->file->allrecord_lock.owner != ntdb) { ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_USE_ERROR, "ntdb_allrecord_unlock: not locked by us!"); return; } /* Upgradable locks are marked as write locks. */ if (ntdb->file->allrecord_lock.ltype != ltype && (!ntdb->file->allrecord_lock.off || ltype != F_RDLCK)) { ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_allrecord_unlock: have %s lock", ntdb->file->allrecord_lock.ltype == F_RDLCK ? "read" : "write"); return; } if (ntdb->file->allrecord_lock.count > 1) { ntdb->file->allrecord_lock.count--; return; } ntdb->file->allrecord_lock.count = 0; ntdb->file->allrecord_lock.ltype = 0; ntdb_brunlock(ntdb, ltype, NTDB_HASH_LOCK_START, 0); } bool ntdb_has_expansion_lock(struct ntdb_context *ntdb) { return find_nestlock(ntdb, NTDB_EXPANSION_LOCK, ntdb) != NULL; } bool ntdb_has_hash_locks(struct ntdb_context *ntdb) { unsigned int i; for (i=0; ifile->num_lockrecs; i++) { if (ntdb->file->lockrecs[i].off >= NTDB_HASH_LOCK_START && ntdb->file->lockrecs[i].off < (NTDB_HASH_LOCK_START + (1 << ntdb->hash_bits))) return true; } return false; } static bool ntdb_has_free_lock(struct ntdb_context *ntdb) { unsigned int i; if (ntdb->flags & NTDB_NOLOCK) return false; for (i=0; ifile->num_lockrecs; i++) { if (ntdb->file->lockrecs[i].off > NTDB_HASH_LOCK_START + (1 << ntdb->hash_bits)) return true; } return false; } enum NTDB_ERROR ntdb_lock_hash(struct ntdb_context *ntdb, unsigned int h, int ltype) { unsigned l = NTDB_HASH_LOCK_START + h; assert(h < (1 << ntdb->hash_bits)); /* a allrecord lock allows us to avoid per chain locks */ if (ntdb->file->allrecord_lock.count) { if (!check_lock_pid(ntdb, "ntdb_lock_hashes", true)) return NTDB_ERR_LOCK; if (ntdb->file->allrecord_lock.owner != ntdb) return owner_conflict(ntdb, "ntdb_lock_hashes"); if (ltype == ntdb->file->allrecord_lock.ltype || ltype == F_RDLCK) { return NTDB_SUCCESS; } return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_USE_ERROR, "ntdb_lock_hashes:" " already have %s allrecordlock", ntdb->file->allrecord_lock.ltype == F_RDLCK ? "read" : "write"); } if (ntdb_has_free_lock(ntdb)) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_lock_hashes: already have free lock"); } if (ntdb_has_expansion_lock(ntdb)) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_lock_hashes:" " already have expansion lock"); } return ntdb_nest_lock(ntdb, l, ltype, NTDB_LOCK_WAIT); } enum NTDB_ERROR ntdb_unlock_hash(struct ntdb_context *ntdb, unsigned int h, int ltype) { unsigned l = NTDB_HASH_LOCK_START + (h & ((1 << ntdb->hash_bits)-1)); if (ntdb->flags & NTDB_NOLOCK) return 0; /* a allrecord lock allows us to avoid per chain locks */ if (ntdb->file->allrecord_lock.count) { if (ntdb->file->allrecord_lock.ltype == F_RDLCK && ltype == F_WRLCK) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_unlock_hashes RO allrecord!"); } if (ntdb->file->allrecord_lock.owner != ntdb) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_USE_ERROR, "ntdb_unlock_hashes:" " not locked by us!"); } return NTDB_SUCCESS; } return ntdb_nest_unlock(ntdb, l, ltype); } /* Hash locks use NTDB_HASH_LOCK_START + .. * Then we begin; bucket offsets are sizeof(ntdb_len_t) apart, so we divide. * The result is that on 32 bit systems we don't use lock values > 2^31 on * files that are less than 4GB. */ static ntdb_off_t free_lock_off(const struct ntdb_context *ntdb, ntdb_off_t b_off) { return NTDB_HASH_LOCK_START + (1 << ntdb->hash_bits) + b_off / sizeof(ntdb_off_t); } enum NTDB_ERROR ntdb_lock_free_bucket(struct ntdb_context *ntdb, ntdb_off_t b_off, enum ntdb_lock_flags waitflag) { assert(b_off >= sizeof(struct ntdb_header)); if (ntdb->flags & NTDB_NOLOCK) return 0; /* a allrecord lock allows us to avoid per chain locks */ if (ntdb->file->allrecord_lock.count) { if (!check_lock_pid(ntdb, "ntdb_lock_free_bucket", true)) return NTDB_ERR_LOCK; if (ntdb->file->allrecord_lock.owner != ntdb) { return owner_conflict(ntdb, "ntdb_lock_free_bucket"); } if (ntdb->file->allrecord_lock.ltype == F_WRLCK) return 0; return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_lock_free_bucket with" " read-only allrecordlock!"); } #if 0 /* FIXME */ if (ntdb_has_expansion_lock(ntdb)) { return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR, "ntdb_lock_free_bucket:" " already have expansion lock"); } #endif return ntdb_nest_lock(ntdb, free_lock_off(ntdb, b_off), F_WRLCK, waitflag); } void ntdb_unlock_free_bucket(struct ntdb_context *ntdb, ntdb_off_t b_off) { if (ntdb->file->allrecord_lock.count) return; ntdb_nest_unlock(ntdb, free_lock_off(ntdb, b_off), F_WRLCK); } _PUBLIC_ enum NTDB_ERROR ntdb_lockall(struct ntdb_context *ntdb) { return ntdb_allrecord_lock(ntdb, F_WRLCK, NTDB_LOCK_WAIT, false); } _PUBLIC_ void ntdb_unlockall(struct ntdb_context *ntdb) { ntdb_allrecord_unlock(ntdb, F_WRLCK); } _PUBLIC_ enum NTDB_ERROR ntdb_lockall_read(struct ntdb_context *ntdb) { return ntdb_allrecord_lock(ntdb, F_RDLCK, NTDB_LOCK_WAIT, false); } _PUBLIC_ void ntdb_unlockall_read(struct ntdb_context *ntdb) { ntdb_allrecord_unlock(ntdb, F_RDLCK); } void ntdb_lock_cleanup(struct ntdb_context *ntdb) { unsigned int i; /* We don't want to warn: they're allowed to close ntdb after fork. */ if (!check_lock_pid(ntdb, "ntdb_close", false)) return; while (ntdb->file->allrecord_lock.count && ntdb->file->allrecord_lock.owner == ntdb) { ntdb_allrecord_unlock(ntdb, ntdb->file->allrecord_lock.ltype); } for (i=0; ifile->num_lockrecs; i++) { if (ntdb->file->lockrecs[i].owner == ntdb) { ntdb_nest_unlock(ntdb, ntdb->file->lockrecs[i].off, ntdb->file->lockrecs[i].ltype); i--; } } } ntdb-1.0/man/000077500000000000000000000000001224151530700130545ustar00rootroot00000000000000ntdb-1.0/man/ntdb.3.xml000066400000000000000000000106621224151530700146730ustar00rootroot00000000000000 ntdb 3 Samba System Administration tools 4.1 ntdb A not-so trivial keyword/data database system #include <ntdb.h> DESCRIPTION If you have previously used the tdb library from Samba, much of this will seem familiar, but there are some API changes which a compiler will warn you about if you simply replace 'tdb' with 'ntdb' in your code! The on-disk format for ntdb is incompatible with tdb. tdb's API was based on gdbm, and ntdb continues this tradition, with enhancements. A differences guide is available in the text file lib/ntdb/doc/TDB_porting.txt in the SAMBA source tree. NTDB API OVERVIEW The complete API is documented in the ntdb.h header, which is kept up-to-date and recommended reading. Normal usage is to call ntdb_open() to create or open an ntdb file. ntdb_store() is used to add records, ntdb_fetch() is used to fetch them. Traversals are supported via callback (ntdb_traverse()) or iteration (ntdb_firstkey() and ntdb_nextkey()). Transactions are supported for batching updates or reads atomically, using ntdb_transaction_start() and ntdb_transaction_commit(). Use With Talloc ntdb_open() takes an optional linked list of attributes: in particular you can specify an alternate allocator (such as talloc): #include <talloc.h> #include <ntdb.h> static void *my_alloc(const void *owner, size_t len, void *priv) { return talloc_size(owner, len); } static void *my_expand(void *old, size_t newlen, void *priv) { return talloc_realloc_size(NULL, old, newlen); } static void my_free(void *old, void *priv) { talloc_free(old); } /* This opens an ntdb file as a talloc object with given parent. */ struct ntdb_context *ntdb_open_talloc(const void *parent, const char *filename) { struct ntdb_context *ntdb; union ntdb_attribute alloc; alloc.base.attr = NTDB_ATTRIBUTE_ALLOCATOR; alloc.base.next = NULL; alloc.alloc.alloc = my_alloc; alloc.alloc.expand = my_expand; alloc.alloc.free = my_free; ntdb = ntdb_open(filename, NTDB_DEFAULT, O_RDWR|O_CREAT, 0600, &alloc); if (ntdb) { talloc_steal(parent, ntdb); talloc_set_name(ntdb, "%s", filename); } return ntdb; } SEE ALSO AUTHOR The original tdb software was created by Andrew Tridgell, and is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. ntdb was derived from tdb, but mostly rewritten by Rusty Russell. COPYRIGHT/LICENSE Copyright (C) Rusty Russell 2013, IBM Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see http://www.gnu.org/licenses/. ntdb-1.0/man/ntdbbackup.8.xml000066400000000000000000000100271224151530700160610ustar00rootroot00000000000000 ntdbbackup 8 Samba System Administration tools 4.1 ntdbbackup tool for backing up and for validating the integrity of samba .ntdb files ntdbbackup -s suffix -v -h DESCRIPTION This tool is part of the samba 1 suite. ntdbbackup is a tool that may be used to backup samba .ntdb files. This tool may also be used to verify the integrity of the .ntdb files prior to samba startup or during normal operation. If it finds file damage and it finds a prior backup the backup file will be restored. OPTIONS -h Get help information. -s suffix The -s option allows the administrator to specify a file backup extension. This way it is possible to keep a history of ntdb backup files by using a new suffix for each backup. -v The -v will check the database for damages (corrupt data) which if detected causes the backup to be restored. COMMANDS GENERAL INFORMATION The ntdbbackup utility can safely be run at any time. It was designed so that it can be used at any time to validate the integrity of ntdb files, even during Samba operation. Typical usage for the command will be: ntdbbackup [-s suffix] *.ntdb Before restarting samba the following command may be run to validate .ntdb files: ntdbbackup -v [-s suffix] *.ntdb Note that Samba 4 can use .tdb files instead, so you should use tdbbackup on those files. Samba .tdb and .ntdb files are stored in various locations, be sure to run backup all .(n)tdb files on the system. Important files includes: secrets.(n)tdb - usual location is in the /usr/local/samba/private directory, or on some systems in /etc/samba. passdb.(n)tdb - usual location is in the /usr/local/samba/private directory, or on some systems in /etc/samba. *.tdb and *.ntdb located in the /usr/local/samba/var directory or on some systems in the /var/cache or /var/lib/samba directories. VERSION This man page is correct for version 4 of the Samba suite. SEE ALSO tdbbackup(8), ntdbrestore(8) AUTHOR The original Samba software and related utilities were created by Andrew Tridgell. Samba is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. The ntdbbackup man page was written by Rusty Russell, based on the tdbbackup man page by John H Terpstra. ntdb-1.0/man/ntdbdump.8.xml000066400000000000000000000045361224151530700155710ustar00rootroot00000000000000 ntdbdump 8 Samba System Administration tools 4.1 ntdbdump tool for printing the contents of an NTDB file ntdbdump -k keyname -e -h filename DESCRIPTION This tool is part of the samba 1 suite. ntdbdump is a very simple utility that 'dumps' the contents of a NTDB (New Trivial DataBase) file to standard output in a human-readable format. This tool can be used when debugging problems with NTDB files. It is intended for those who are somewhat familiar with Samba internals. OPTIONS -h Get help information. -k keyname The -k option restricts dumping to a single key, if found. SEE ALSO tdbdump(8), ntdbtool(8) VERSION This man page is correct for version 4 of the Samba suite. AUTHOR The original Samba software and related utilities were created by Andrew Tridgell. Samba is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. The ntdbdump man page was written by Rusty Russell, base on the tdbdump man page by Jelmer Vernooij. ntdb-1.0/man/ntdbrestore.8.xml000066400000000000000000000042131224151530700162770ustar00rootroot00000000000000 ntdbrestore 8 Samba System Administration tools 4.1 ntdbrestore tool for creating a NTDB file out of a ntdbdump output ntdbrestore ntdbfilename DESCRIPTION This tool is part of the samba 1 suite. ntdbrestore is a very simple utility that 'restores' the contents of dump file into NTDB (New Trivial DataBase) file. The dump file is obtained from the ntdbdump or tdbdump commands. This tool wait on the standard input for the content of the dump and will write the ntdb in the ntdbfilename parameter. This tool can be used to translate between ntdb and tdb files by dumping and restoring. VERSION This man page is correct for version 4 of the Samba suite. SEE ALSO ntdbdump(8), tdbrestore(8) AUTHOR The original Samba software and related utilities were created by Andrew Tridgell. Samba is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. ntdbrestore was written by Rusty Russell based on tdbrestore, which was initially written by Volker Lendecke based on an idea by Simon McVittie. The ntdbrestore man page was written by Rusty Russell, based on the tdbrestore man page by Matthieu Patou. ntdb-1.0/man/ntdbtool.8.xml000066400000000000000000000133671224151530700156030ustar00rootroot00000000000000 ntdbtool 8 Samba System Administration tools 4.1 ntdbtool manipulate the contents NTDB files ntdbtool ntdbtool NTDBFILE COMMANDS DESCRIPTION This tool is part of the samba 1 suite. ntdbtool a tool for displaying and altering the contents of Samba NTDB (New Trivial DataBase) files. Each of the commands listed below can be entered interactively or provided on the command line. COMMANDS NTDBFILE Create a new database named NTDBFILE. NTDBFILE Open an existing database named NTDBFILE. Erase the current database. Dump the current database as strings. Dump the current database as connection records. Dump the current database keys as strings. Dump the current database keys as hex values. Print summary information about the current database. KEY DATA Insert a record into the current database. KEY NTDBFILE Move a record from the current database into NTDBFILE. KEY DATA Store (replace) a record in the current database. KEY Show a record by key. KEY Delete a record by key. Print the current database hash table and free list. Print the current database and free list. COMMAND Execute the given system command. Print the first record in the current database. Print the next record in the current database. Check the integrity of the current database. Repack a database using a temporary file to remove fragmentation. Exit ntdbtool. SEE ALSO tdbtool(8) CAVEATS The contents of the Samba NTDB files are private to the implementation and should not be altered with ntdbtool. VERSION This man page is correct for version 4.0 of the Samba suite. AUTHOR The original Samba software and related utilities were created by Andrew Tridgell. Samba is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. ntdb-1.0/ntdb.c000066400000000000000000000354011224151530700133770ustar00rootroot00000000000000 /* Trivial Database 2: fetch, store and misc routines. Copyright (C) Rusty Russell 2010 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #ifndef HAVE_LIBREPLACE #include #include #endif static enum NTDB_ERROR update_rec_hdr(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t keylen, ntdb_len_t datalen, struct ntdb_used_record *rec) { uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec); enum NTDB_ERROR ecode; ecode = set_header(ntdb, rec, NTDB_USED_MAGIC, keylen, datalen, keylen + dataroom); if (ecode == NTDB_SUCCESS) { ecode = ntdb_write_convert(ntdb, off, rec, sizeof(*rec)); } return ecode; } static enum NTDB_ERROR replace_data(struct ntdb_context *ntdb, struct hash_info *h, NTDB_DATA key, NTDB_DATA dbuf, ntdb_off_t old_off, ntdb_len_t old_room, bool growing) { ntdb_off_t new_off; enum NTDB_ERROR ecode; /* Allocate a new record. */ new_off = alloc(ntdb, key.dsize, dbuf.dsize, NTDB_USED_MAGIC, growing); if (NTDB_OFF_IS_ERR(new_off)) { return NTDB_OFF_TO_ERR(new_off); } /* We didn't like the existing one: remove it. */ if (old_off) { ntdb->stats.frees++; ecode = add_free_record(ntdb, old_off, sizeof(struct ntdb_used_record) + key.dsize + old_room, NTDB_LOCK_WAIT, true); if (ecode == NTDB_SUCCESS) ecode = replace_in_hash(ntdb, h, new_off); } else { ecode = add_to_hash(ntdb, h, new_off); } if (ecode != NTDB_SUCCESS) { return ecode; } new_off += sizeof(struct ntdb_used_record); ecode = ntdb->io->twrite(ntdb, new_off, key.dptr, key.dsize); if (ecode != NTDB_SUCCESS) { return ecode; } new_off += key.dsize; ecode = ntdb->io->twrite(ntdb, new_off, dbuf.dptr, dbuf.dsize); if (ecode != NTDB_SUCCESS) { return ecode; } if (ntdb->flags & NTDB_SEQNUM) ntdb_inc_seqnum(ntdb); return NTDB_SUCCESS; } static enum NTDB_ERROR update_data(struct ntdb_context *ntdb, ntdb_off_t off, NTDB_DATA dbuf, ntdb_len_t extra) { enum NTDB_ERROR ecode; ecode = ntdb->io->twrite(ntdb, off, dbuf.dptr, dbuf.dsize); if (ecode == NTDB_SUCCESS && extra) { /* Put a zero in; future versions may append other data. */ ecode = ntdb->io->twrite(ntdb, off + dbuf.dsize, "", 1); } if (ntdb->flags & NTDB_SEQNUM) ntdb_inc_seqnum(ntdb); return ecode; } _PUBLIC_ enum NTDB_ERROR ntdb_store(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, int flag) { struct hash_info h; ntdb_off_t off; ntdb_len_t old_room = 0; struct ntdb_used_record rec; enum NTDB_ERROR ecode; off = find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL); if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } /* Now we have lock on this hash bucket. */ if (flag == NTDB_INSERT) { if (off) { ecode = NTDB_ERR_EXISTS; goto out; } } else { if (off) { old_room = rec_data_length(&rec) + rec_extra_padding(&rec); if (old_room >= dbuf.dsize) { /* Can modify in-place. Easy! */ ecode = update_rec_hdr(ntdb, off, key.dsize, dbuf.dsize, &rec); if (ecode != NTDB_SUCCESS) { goto out; } ecode = update_data(ntdb, off + sizeof(rec) + key.dsize, dbuf, old_room - dbuf.dsize); if (ecode != NTDB_SUCCESS) { goto out; } ntdb_unlock_hash(ntdb, h.h, F_WRLCK); return NTDB_SUCCESS; } } else { if (flag == NTDB_MODIFY) { /* if the record doesn't exist and we are in NTDB_MODIFY mode then we should fail the store */ ecode = NTDB_ERR_NOEXIST; goto out; } } } /* If we didn't use the old record, this implies we're growing. */ ecode = replace_data(ntdb, &h, key, dbuf, off, old_room, off); out: ntdb_unlock_hash(ntdb, h.h, F_WRLCK); return ecode; } _PUBLIC_ enum NTDB_ERROR ntdb_append(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf) { struct hash_info h; ntdb_off_t off; struct ntdb_used_record rec; ntdb_len_t old_room = 0, old_dlen; unsigned char *newdata; NTDB_DATA new_dbuf; enum NTDB_ERROR ecode; off = find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL); if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } if (off) { old_dlen = rec_data_length(&rec); old_room = old_dlen + rec_extra_padding(&rec); /* Fast path: can append in place. */ if (rec_extra_padding(&rec) >= dbuf.dsize) { ecode = update_rec_hdr(ntdb, off, key.dsize, old_dlen + dbuf.dsize, &rec); if (ecode != NTDB_SUCCESS) { goto out; } off += sizeof(rec) + key.dsize + old_dlen; ecode = update_data(ntdb, off, dbuf, rec_extra_padding(&rec)); goto out; } /* Slow path. */ newdata = ntdb->alloc_fn(ntdb, key.dsize + old_dlen + dbuf.dsize, ntdb->alloc_data); if (!newdata) { ecode = ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_append:" " failed to allocate %zu bytes", (size_t)(key.dsize + old_dlen + dbuf.dsize)); goto out; } ecode = ntdb->io->tread(ntdb, off + sizeof(rec) + key.dsize, newdata, old_dlen); if (ecode != NTDB_SUCCESS) { goto out_free_newdata; } memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize); new_dbuf.dptr = newdata; new_dbuf.dsize = old_dlen + dbuf.dsize; } else { newdata = NULL; new_dbuf = dbuf; } /* If they're using ntdb_append(), it implies they're growing record. */ ecode = replace_data(ntdb, &h, key, new_dbuf, off, old_room, true); out_free_newdata: ntdb->free_fn(newdata, ntdb->alloc_data); out: ntdb_unlock_hash(ntdb, h.h, F_WRLCK); return ecode; } _PUBLIC_ enum NTDB_ERROR ntdb_fetch(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA *data) { ntdb_off_t off; struct ntdb_used_record rec; struct hash_info h; enum NTDB_ERROR ecode; const char *keyp; off = find_and_lock(ntdb, key, F_RDLCK, &h, &rec, &keyp); if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } if (!off) { ecode = NTDB_ERR_NOEXIST; } else { data->dsize = rec_data_length(&rec); data->dptr = ntdb->alloc_fn(ntdb, data->dsize, ntdb->alloc_data); if (unlikely(!data->dptr)) { ecode = NTDB_ERR_OOM; } else { memcpy(data->dptr, keyp + key.dsize, data->dsize); ecode = NTDB_SUCCESS; } ntdb_access_release(ntdb, keyp); } ntdb_unlock_hash(ntdb, h.h, F_RDLCK); return ecode; } _PUBLIC_ bool ntdb_exists(struct ntdb_context *ntdb, NTDB_DATA key) { ntdb_off_t off; struct ntdb_used_record rec; struct hash_info h; off = find_and_lock(ntdb, key, F_RDLCK, &h, &rec, NULL); if (NTDB_OFF_IS_ERR(off)) { return false; } ntdb_unlock_hash(ntdb, h.h, F_RDLCK); return off ? true : false; } _PUBLIC_ enum NTDB_ERROR ntdb_delete(struct ntdb_context *ntdb, NTDB_DATA key) { ntdb_off_t off; struct ntdb_used_record rec; struct hash_info h; enum NTDB_ERROR ecode; off = find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL); if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } if (!off) { ecode = NTDB_ERR_NOEXIST; goto unlock; } ecode = delete_from_hash(ntdb, &h); if (ecode != NTDB_SUCCESS) { goto unlock; } /* Free the deleted entry. */ ntdb->stats.frees++; ecode = add_free_record(ntdb, off, sizeof(struct ntdb_used_record) + rec_key_length(&rec) + rec_data_length(&rec) + rec_extra_padding(&rec), NTDB_LOCK_WAIT, true); if (ntdb->flags & NTDB_SEQNUM) ntdb_inc_seqnum(ntdb); unlock: ntdb_unlock_hash(ntdb, h.h, F_WRLCK); return ecode; } _PUBLIC_ unsigned int ntdb_get_flags(struct ntdb_context *ntdb) { return ntdb->flags; } static bool inside_transaction(const struct ntdb_context *ntdb) { return ntdb->transaction != NULL; } static bool readonly_changable(struct ntdb_context *ntdb, const char *caller) { if (inside_transaction(ntdb)) { ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "%s: can't change" " NTDB_RDONLY inside transaction", caller); return false; } return true; } _PUBLIC_ void ntdb_add_flag(struct ntdb_context *ntdb, unsigned flag) { if (ntdb->flags & NTDB_INTERNAL) { ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_add_flag: internal db"); return; } switch (flag) { case NTDB_NOLOCK: ntdb->flags |= NTDB_NOLOCK; break; case NTDB_NOMMAP: if (ntdb->file->direct_count) { ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_add_flag: Can't get NTDB_NOMMAP from" " ntdb_parse_record!"); return; } ntdb->flags |= NTDB_NOMMAP; #ifndef HAVE_INCOHERENT_MMAP ntdb_munmap(ntdb); #endif break; case NTDB_NOSYNC: ntdb->flags |= NTDB_NOSYNC; break; case NTDB_SEQNUM: ntdb->flags |= NTDB_SEQNUM; break; case NTDB_ALLOW_NESTING: ntdb->flags |= NTDB_ALLOW_NESTING; break; case NTDB_RDONLY: if (readonly_changable(ntdb, "ntdb_add_flag")) ntdb->flags |= NTDB_RDONLY; break; default: ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_add_flag: Unknown flag %u", flag); } } _PUBLIC_ void ntdb_remove_flag(struct ntdb_context *ntdb, unsigned flag) { if (ntdb->flags & NTDB_INTERNAL) { ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_remove_flag: internal db"); return; } switch (flag) { case NTDB_NOLOCK: ntdb->flags &= ~NTDB_NOLOCK; break; case NTDB_NOMMAP: ntdb->flags &= ~NTDB_NOMMAP; #ifndef HAVE_INCOHERENT_MMAP /* If mmap incoherent, we were mmaping anyway. */ ntdb_mmap(ntdb); #endif break; case NTDB_NOSYNC: ntdb->flags &= ~NTDB_NOSYNC; break; case NTDB_SEQNUM: ntdb->flags &= ~NTDB_SEQNUM; break; case NTDB_ALLOW_NESTING: ntdb->flags &= ~NTDB_ALLOW_NESTING; break; case NTDB_RDONLY: if ((ntdb->open_flags & O_ACCMODE) == O_RDONLY) { ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_remove_flag: can't" " remove NTDB_RDONLY on ntdb" " opened with O_RDONLY"); break; } if (readonly_changable(ntdb, "ntdb_remove_flag")) ntdb->flags &= ~NTDB_RDONLY; break; default: ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_remove_flag: Unknown flag %u", flag); } } _PUBLIC_ const char *ntdb_errorstr(enum NTDB_ERROR ecode) { /* Gcc warns if you miss a case in the switch, so use that. */ switch (NTDB_ERR_TO_OFF(ecode)) { case NTDB_ERR_TO_OFF(NTDB_SUCCESS): return "Success"; case NTDB_ERR_TO_OFF(NTDB_ERR_CORRUPT): return "Corrupt database"; case NTDB_ERR_TO_OFF(NTDB_ERR_IO): return "IO Error"; case NTDB_ERR_TO_OFF(NTDB_ERR_LOCK): return "Locking error"; case NTDB_ERR_TO_OFF(NTDB_ERR_OOM): return "Out of memory"; case NTDB_ERR_TO_OFF(NTDB_ERR_EXISTS): return "Record exists"; case NTDB_ERR_TO_OFF(NTDB_ERR_EINVAL): return "Invalid parameter"; case NTDB_ERR_TO_OFF(NTDB_ERR_NOEXIST): return "Record does not exist"; case NTDB_ERR_TO_OFF(NTDB_ERR_RDONLY): return "write not permitted"; } return "Invalid error code"; } enum NTDB_ERROR COLD ntdb_logerr(struct ntdb_context *ntdb, enum NTDB_ERROR ecode, enum ntdb_log_level level, const char *fmt, ...) { char *message; va_list ap; size_t len; /* ntdb_open paths care about errno, so save it. */ int saved_errno = errno; if (!ntdb->log_fn) return ecode; va_start(ap, fmt); len = vsnprintf(NULL, 0, fmt, ap); va_end(ap); message = ntdb->alloc_fn(ntdb, len + 1, ntdb->alloc_data); if (!message) { ntdb->log_fn(ntdb, NTDB_LOG_ERROR, NTDB_ERR_OOM, "out of memory formatting message:", ntdb->log_data); ntdb->log_fn(ntdb, level, ecode, fmt, ntdb->log_data); } else { va_start(ap, fmt); vsnprintf(message, len+1, fmt, ap); va_end(ap); ntdb->log_fn(ntdb, level, ecode, message, ntdb->log_data); ntdb->free_fn(message, ntdb->alloc_data); } errno = saved_errno; return ecode; } _PUBLIC_ enum NTDB_ERROR ntdb_parse_record_(struct ntdb_context *ntdb, NTDB_DATA key, enum NTDB_ERROR (*parse)(NTDB_DATA k, NTDB_DATA d, void *data), void *data) { ntdb_off_t off; struct ntdb_used_record rec; struct hash_info h; enum NTDB_ERROR ecode; const char *keyp; off = find_and_lock(ntdb, key, F_RDLCK, &h, &rec, &keyp); if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } if (!off) { ecode = NTDB_ERR_NOEXIST; } else { unsigned int old_flags; NTDB_DATA d = ntdb_mkdata(keyp + key.dsize, rec_data_length(&rec)); /* * Make sure they don't try to write db, since they * have read lock! They can if they've done * ntdb_lockall(): if it was ntdb_lockall_read, that'll * stop them doing a write operation anyway. */ old_flags = ntdb->flags; if (!ntdb->file->allrecord_lock.count && !(ntdb->flags & NTDB_NOLOCK)) { ntdb->flags |= NTDB_RDONLY; } ecode = parse(key, d, data); ntdb->flags = old_flags; ntdb_access_release(ntdb, keyp); } ntdb_unlock_hash(ntdb, h.h, F_RDLCK); return ecode; } _PUBLIC_ const char *ntdb_name(const struct ntdb_context *ntdb) { return ntdb->name; } _PUBLIC_ int64_t ntdb_get_seqnum(struct ntdb_context *ntdb) { return ntdb_read_off(ntdb, offsetof(struct ntdb_header, seqnum)); } _PUBLIC_ int ntdb_fd(const struct ntdb_context *ntdb) { return ntdb->file->fd; } struct traverse_state { enum NTDB_ERROR error; struct ntdb_context *dest_db; }; /* traverse function for repacking */ static int repack_traverse(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA data, struct traverse_state *state) { state->error = ntdb_store(state->dest_db, key, data, NTDB_INSERT); if (state->error != NTDB_SUCCESS) { return -1; } return 0; } _PUBLIC_ enum NTDB_ERROR ntdb_repack(struct ntdb_context *ntdb) { struct ntdb_context *tmp_db; struct traverse_state state; state.error = ntdb_transaction_start(ntdb); if (state.error != NTDB_SUCCESS) { return state.error; } tmp_db = ntdb_open("tmpdb", NTDB_INTERNAL, O_RDWR|O_CREAT, 0, NULL); if (tmp_db == NULL) { state.error = ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, __location__ " Failed to create tmp_db"); ntdb_transaction_cancel(ntdb); return state.error; } state.dest_db = tmp_db; if (ntdb_traverse(ntdb, repack_traverse, &state) < 0) { goto fail; } state.error = ntdb_wipe_all(ntdb); if (state.error != NTDB_SUCCESS) { goto fail; } state.dest_db = ntdb; if (ntdb_traverse(tmp_db, repack_traverse, &state) < 0) { goto fail; } ntdb_close(tmp_db); return ntdb_transaction_commit(ntdb); fail: ntdb_transaction_cancel(ntdb); ntdb_close(tmp_db); return state.error; } ntdb-1.0/ntdb.h000066400000000000000000000750351224151530700134130ustar00rootroot00000000000000#ifndef CCAN_NTDB_H #define CCAN_NTDB_H /* NTDB: trivial database library version 2 Copyright (C) Andrew Tridgell 1999-2004 Copyright (C) Rusty Russell 2010-2012 ** NOTE! The following LGPL license applies to the ntdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef __cplusplus extern "C" { #endif #ifdef HAVE_LIBREPLACE #include #else #if HAVE_FILE_OFFSET_BITS #define _FILE_OFFSET_BITS 64 #endif /* For mode_t */ #include /* For O_* flags. */ #include /* For sig_atomic_t. */ #include /* For uint64_t */ #include /* For bool */ #include /* For memcmp */ #include #endif #if HAVE_CCAN #include #include #include #else #ifndef typesafe_cb_preargs /* Failing to have CCAN just mean less typesafe protection, etc. */ #define typesafe_cb_preargs(rtype, atype, fn, arg, ...) \ ((rtype (*)(__VA_ARGS__, atype))(fn)) #endif #ifndef cast_const #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T) #define cast_const(type, expr) ((type)((intptr_t)(expr))) #else #define cast_const(type, expr) ((type *)(expr)) #endif #endif #endif /* !HAVE_CCAN */ union ntdb_attribute; struct ntdb_context; /** * struct TDB_DATA - (n)tdb data blob * * To ease compatibility, we use 'struct TDB_DATA' from tdb.h, so if * you want to include both tdb.h and ntdb.h, you need to #include * tdb.h first. */ #ifndef __TDB_H__ struct TDB_DATA { unsigned char *dptr; size_t dsize; }; #endif typedef struct TDB_DATA NTDB_DATA; /** * ntdb_open - open a database file * @name: the file name (or database name if flags contains NTDB_INTERNAL) * @ntdb_flags: options for this database * @open_flags: flags argument for ntdb's open() call. * @mode: mode argument for ntdb's open() call. * @attributes: linked list of extra attributes for this ntdb. * * This call opens (and potentially creates) a database file. * Multiple processes can have the NTDB file open at once. * * On failure it will return NULL, and set errno: it may also call * any log attribute found in @attributes. * * See also: * union ntdb_attribute */ struct ntdb_context *ntdb_open(const char *name, int ntdb_flags, int open_flags, mode_t mode, union ntdb_attribute *attributes); /* flags for ntdb_open() */ #define NTDB_DEFAULT 0 /* just a readability place holder */ #define NTDB_INTERNAL 2 /* don't store on disk */ #define NTDB_NOLOCK 4 /* don't do any locking */ #define NTDB_NOMMAP 8 /* don't use mmap */ #define NTDB_CONVERT 16 /* convert endian */ #define NTDB_NOSYNC 64 /* don't use synchronous transactions */ #define NTDB_SEQNUM 128 /* maintain a sequence number */ #define NTDB_ALLOW_NESTING 256 /* fake nested transactions */ #define NTDB_RDONLY 512 /* implied by O_RDONLY */ #define NTDB_CANT_CHECK 2048 /* has a feature which we don't understand */ /** * ntdb_close - close and free a ntdb. * @ntdb: the ntdb context returned from ntdb_open() * * This always succeeds, in that @ntdb is unusable after this call. But if * some unexpected error occurred while closing, it will return non-zero * (the only clue as to cause will be via the log attribute). */ int ntdb_close(struct ntdb_context *ntdb); /** * enum NTDB_ERROR - error returns for NTDB * * See Also: * ntdb_errorstr() */ enum NTDB_ERROR { NTDB_SUCCESS = 0, /* No error. */ NTDB_ERR_CORRUPT = -1, /* We read the db, and it was bogus. */ NTDB_ERR_IO = -2, /* We couldn't read/write the db. */ NTDB_ERR_LOCK = -3, /* Locking failed. */ NTDB_ERR_OOM = -4, /* Out of Memory. */ NTDB_ERR_EXISTS = -5, /* The key already exists. */ NTDB_ERR_NOEXIST = -6, /* The key does not exist. */ NTDB_ERR_EINVAL = -7, /* You're using it wrong. */ NTDB_ERR_RDONLY = -8, /* The database is read-only. */ NTDB_ERR_LAST = NTDB_ERR_RDONLY }; /** * ntdb_store - store a key/value pair in a ntdb. * @ntdb: the ntdb context returned from ntdb_open() * @key: the key * @dbuf: the data to associate with the key. * @flag: NTDB_REPLACE, NTDB_INSERT or NTDB_MODIFY. * * This inserts (or overwrites) a key/value pair in the NTDB. If flag * is NTDB_REPLACE, it doesn't matter whether the key exists or not; * NTDB_INSERT means it must not exist (returns NTDB_ERR_EXISTS otherwise), * and NTDB_MODIFY means it must exist (returns NTDB_ERR_NOEXIST otherwise). * * On success, this returns NTDB_SUCCESS. * * See also: * ntdb_fetch, ntdb_transaction_start, ntdb_append, ntdb_delete. */ enum NTDB_ERROR ntdb_store(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, int flag); /* flags to ntdb_store() */ #define NTDB_REPLACE 1 /* A readability place holder */ #define NTDB_INSERT 2 /* Don't overwrite an existing entry */ #define NTDB_MODIFY 3 /* Don't create an existing entry */ /** * ntdb_fetch - fetch a value from a ntdb. * @ntdb: the ntdb context returned from ntdb_open() * @key: the key * @data: pointer to data. * * This looks up a key in the database and sets it in @data. * * If it returns NTDB_SUCCESS, the key was found: it is your * responsibility to call free() on @data->dptr. * * Otherwise, it returns an error (usually, NTDB_ERR_NOEXIST) and @data is * undefined. */ enum NTDB_ERROR ntdb_fetch(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA *data); /** * ntdb_errorstr - map the ntdb error onto a constant readable string * @ecode: the enum NTDB_ERROR to map. * * This is useful for displaying errors to users. */ const char *ntdb_errorstr(enum NTDB_ERROR ecode); /** * ntdb_append - append a value to a key/value pair in a ntdb. * @ntdb: the ntdb context returned from ntdb_open() * @key: the key * @dbuf: the data to append. * * This is equivalent to fetching a record, reallocating .dptr to add the * data, and writing it back, only it's much more efficient. If the key * doesn't exist, it's equivalent to ntdb_store (with an additional hint that * you expect to expand the record in future). * * See Also: * ntdb_fetch(), ntdb_store() */ enum NTDB_ERROR ntdb_append(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf); /** * ntdb_delete - delete a key from a ntdb. * @ntdb: the ntdb context returned from ntdb_open() * @key: the key to delete. * * Returns NTDB_SUCCESS on success, or an error (usually NTDB_ERR_NOEXIST). * * See Also: * ntdb_fetch(), ntdb_store() */ enum NTDB_ERROR ntdb_delete(struct ntdb_context *ntdb, NTDB_DATA key); /** * ntdb_exists - does a key exist in the database? * @ntdb: the ntdb context returned from ntdb_open() * @key: the key to search for. * * Returns true if it exists, or false if it doesn't or any other error. */ bool ntdb_exists(struct ntdb_context *ntdb, NTDB_DATA key); /** * ntdb_deq - are NTDB_DATA equal? * @a: one NTDB_DATA * @b: another NTDB_DATA */ static inline bool ntdb_deq(NTDB_DATA a, NTDB_DATA b) { return a.dsize == b.dsize && memcmp(a.dptr, b.dptr, a.dsize) == 0; } /** * ntdb_mkdata - make a NTDB_DATA from const data * @p: the constant pointer * @len: the length * * As the dptr member of NTDB_DATA is not constant, you need to * cast it. This function keeps thost casts in one place, as well as * suppressing the warning some compilers give when casting away a * qualifier (eg. gcc with -Wcast-qual) */ static inline NTDB_DATA ntdb_mkdata(const void *p, size_t len) { NTDB_DATA d; d.dptr = cast_const(void *, p); d.dsize = len; return d; } /** * ntdb_transaction_start - start a transaction * @ntdb: the ntdb context returned from ntdb_open() * * This begins a series of atomic operations. Other processes will be able * to read the ntdb, but not alter it (they will block), nor will they see * any changes until ntdb_transaction_commit() is called. * * Note that if the NTDB_ALLOW_NESTING flag is set, a ntdb_transaction_start() * within a transaction will succeed, but it's not a real transaction: * (1) An inner transaction which is committed is not actually committed until * the outer transaction is; if the outer transaction is cancelled, the * inner ones are discarded. * (2) ntdb_transaction_cancel() marks the outer transaction as having an error, * so the final ntdb_transaction_commit() will fail. * (3) the outer transaction will see the results of the inner transaction. * * See Also: * ntdb_transaction_cancel, ntdb_transaction_commit. */ enum NTDB_ERROR ntdb_transaction_start(struct ntdb_context *ntdb); /** * ntdb_transaction_cancel - abandon a transaction * @ntdb: the ntdb context returned from ntdb_open() * * This aborts a transaction, discarding any changes which were made. * ntdb_close() does this implicitly. */ void ntdb_transaction_cancel(struct ntdb_context *ntdb); /** * ntdb_transaction_commit - commit a transaction * @ntdb: the ntdb context returned from ntdb_open() * * This completes a transaction, writing any changes which were made. * * fsync() is used to commit the transaction (unless NTDB_NOSYNC is set), * making it robust against machine crashes, but very slow compared to * other NTDB operations. * * A failure can only be caused by unexpected errors (eg. I/O or * memory); this is no point looping on transaction failure. * * See Also: * ntdb_transaction_prepare_commit() */ enum NTDB_ERROR ntdb_transaction_commit(struct ntdb_context *ntdb); /** * ntdb_transaction_prepare_commit - prepare to commit a transaction * @ntdb: the ntdb context returned from ntdb_open() * * This ensures we have the resources to commit a transaction (using * ntdb_transaction_commit): if this succeeds then a transaction will only * fail if the write() or fsync() calls fail. * * If this fails you must still call ntdb_transaction_cancel() to cancel * the transaction. * * See Also: * ntdb_transaction_commit() */ enum NTDB_ERROR ntdb_transaction_prepare_commit(struct ntdb_context *ntdb); /** * ntdb_traverse - traverse a NTDB * @ntdb: the ntdb context returned from ntdb_open() * @fn: the function to call for every key/value pair (or NULL) * @p: the pointer to hand to @f * * This walks the NTDB until all they keys have been traversed, or @fn * returns non-zero. If the traverse function or other processes are * changing data or adding or deleting keys, the traverse may be * unreliable: keys may be skipped or (rarely) visited twice. * * There is one specific exception: the special case of deleting the * current key does not undermine the reliability of the traversal. * * On success, returns the number of keys iterated. On error returns * a negative enum NTDB_ERROR value. */ #define ntdb_traverse(ntdb, fn, p) \ ntdb_traverse_(ntdb, typesafe_cb_preargs(int, void *, (fn), (p), \ struct ntdb_context *, \ NTDB_DATA, NTDB_DATA), (p)) int64_t ntdb_traverse_(struct ntdb_context *ntdb, int (*fn)(struct ntdb_context *, NTDB_DATA, NTDB_DATA, void *), void *p); /** * ntdb_parse_record - operate directly on data in the database. * @ntdb: the ntdb context returned from ntdb_open() * @key: the key whose record we should hand to @parse * @parse: the function to call for the data * @data: the private pointer to hand to @parse (types must match). * * This avoids a copy for many cases, by handing you a pointer into * the memory-mapped database. It also locks the record to prevent * other accesses at the same time, so it won't change. * * Within the @parse callback you can perform read operations on the * database, but no write operations: no ntdb_store() or * ntdb_delete(), for example. The exception is if you call * ntdb_lockall() before ntdb_parse_record(). * * Never alter the data handed to parse()! */ #define ntdb_parse_record(ntdb, key, parse, data) \ ntdb_parse_record_((ntdb), (key), \ typesafe_cb_preargs(enum NTDB_ERROR, void *, \ (parse), (data), \ NTDB_DATA, NTDB_DATA), (data)) enum NTDB_ERROR ntdb_parse_record_(struct ntdb_context *ntdb, NTDB_DATA key, enum NTDB_ERROR (*parse)(NTDB_DATA k, NTDB_DATA d, void *data), void *data); /** * ntdb_get_seqnum - get a database sequence number * @ntdb: the ntdb context returned from ntdb_open() * * This returns a sequence number: any change to the database from a * ntdb context opened with the NTDB_SEQNUM flag will cause that number * to increment. Note that the incrementing is unreliable (it is done * without locking), so this is only useful as an optimization. * * For example, you may have a regular database backup routine which * does not operate if the sequence number is unchanged. In the * unlikely event of a failed increment, it will be backed up next * time any way. * * Returns an enum NTDB_ERROR (ie. negative) on error. */ int64_t ntdb_get_seqnum(struct ntdb_context *ntdb); /** * ntdb_firstkey - get the "first" key in a NTDB * @ntdb: the ntdb context returned from ntdb_open() * @key: pointer to key. * * This returns an arbitrary key in the database; with ntdb_nextkey() it allows * open-coded traversal of the database, though it is slightly less efficient * than ntdb_traverse. * * It is your responsibility to free @key->dptr on success. * * Returns NTDB_ERR_NOEXIST if the database is empty. */ enum NTDB_ERROR ntdb_firstkey(struct ntdb_context *ntdb, NTDB_DATA *key); /** * ntdb_nextkey - get the "next" key in a NTDB * @ntdb: the ntdb context returned from ntdb_open() * @key: a key returned by ntdb_firstkey() or ntdb_nextkey(). * * This returns another key in the database; it will free @key.dptr for * your convenience. * * Returns NTDB_ERR_NOEXIST if there are no more keys. */ enum NTDB_ERROR ntdb_nextkey(struct ntdb_context *ntdb, NTDB_DATA *key); /** * ntdb_chainlock - lock a record in the NTDB * @ntdb: the ntdb context returned from ntdb_open() * @key: the key to lock. * * This prevents any access occurring to a group of keys including @key, * even if @key does not exist. This allows primitive atomic updates of * records without using transactions. * * You cannot begin a transaction while holding a ntdb_chainlock(), nor can * you do any operations on any other keys in the database. This also means * that you cannot hold more than one ntdb_chainlock() at a time. * * See Also: * ntdb_chainunlock() */ enum NTDB_ERROR ntdb_chainlock(struct ntdb_context *ntdb, NTDB_DATA key); /** * ntdb_chainunlock - unlock a record in the NTDB * @ntdb: the ntdb context returned from ntdb_open() * @key: the key to unlock. * * The key must have previously been locked by ntdb_chainlock(). */ void ntdb_chainunlock(struct ntdb_context *ntdb, NTDB_DATA key); /** * ntdb_chainlock_read - lock a record in the NTDB, for reading * @ntdb: the ntdb context returned from ntdb_open() * @key: the key to lock. * * This prevents any changes from occurring to a group of keys including @key, * even if @key does not exist. This allows primitive atomic updates of * records without using transactions. * * You cannot begin a transaction while holding a ntdb_chainlock_read(), nor can * you do any operations on any other keys in the database. This also means * that you cannot hold more than one ntdb_chainlock()/read() at a time. * * See Also: * ntdb_chainlock() */ enum NTDB_ERROR ntdb_chainlock_read(struct ntdb_context *ntdb, NTDB_DATA key); /** * ntdb_chainunlock_read - unlock a record in the NTDB for reading * @ntdb: the ntdb context returned from ntdb_open() * @key: the key to unlock. * * The key must have previously been locked by ntdb_chainlock_read(). */ void ntdb_chainunlock_read(struct ntdb_context *ntdb, NTDB_DATA key); /** * ntdb_lockall - lock the entire NTDB * @ntdb: the ntdb context returned from ntdb_open() * * You cannot hold a ntdb_chainlock while calling this. It nests, so you * must call ntdb_unlockall as many times as you call ntdb_lockall. */ enum NTDB_ERROR ntdb_lockall(struct ntdb_context *ntdb); /** * ntdb_unlockall - unlock the entire NTDB * @ntdb: the ntdb context returned from ntdb_open() */ void ntdb_unlockall(struct ntdb_context *ntdb); /** * ntdb_lockall_read - lock the entire NTDB for reading * @ntdb: the ntdb context returned from ntdb_open() * * This prevents others writing to the database, eg. ntdb_delete, ntdb_store, * ntdb_append, but not ntdb_fetch. * * You cannot hold a ntdb_chainlock while calling this. It nests, so you * must call ntdb_unlockall_read as many times as you call ntdb_lockall_read. */ enum NTDB_ERROR ntdb_lockall_read(struct ntdb_context *ntdb); /** * ntdb_unlockall_read - unlock the entire NTDB for reading * @ntdb: the ntdb context returned from ntdb_open() */ void ntdb_unlockall_read(struct ntdb_context *ntdb); /** * ntdb_wipe_all - wipe the database clean * @ntdb: the ntdb context returned from ntdb_open() * * Completely erase the database. This is faster than iterating through * each key and doing ntdb_delete. */ enum NTDB_ERROR ntdb_wipe_all(struct ntdb_context *ntdb); /** * ntdb_repack - repack the database * @ntdb: the ntdb context returned from ntdb_open() * * This repacks the database; if it is suffering from a great deal of * fragmentation this might help. However, it can take twice the * memory of the existing NTDB. */ enum NTDB_ERROR ntdb_repack(struct ntdb_context *ntdb); /** * ntdb_check - check a NTDB for consistency * @ntdb: the ntdb context returned from ntdb_open() * @check: function to check each key/data pair (or NULL) * @data: argument for @check, must match type. * * This performs a consistency check of the open database, optionally calling * a check() function on each record so you can do your own data consistency * checks as well. If check() returns an error, that is returned from * ntdb_check(). * * Note that the NTDB uses a feature which we don't understand which * indicates we can't run ntdb_check(), this will log a warning to that * effect and return NTDB_SUCCESS. You can detect this condition by * looking for NTDB_CANT_CHECK in ntdb_get_flags(). * * Returns NTDB_SUCCESS or an error. */ #define ntdb_check(ntdb, check, data) \ ntdb_check_((ntdb), typesafe_cb_preargs(enum NTDB_ERROR, void *, \ (check), (data), \ NTDB_DATA, \ NTDB_DATA), \ (data)) enum NTDB_ERROR ntdb_check_(struct ntdb_context *ntdb, enum NTDB_ERROR (*check)(NTDB_DATA k, NTDB_DATA d, void *data), void *data); /** * enum ntdb_summary_flags - flags for ntdb_summary. */ enum ntdb_summary_flags { NTDB_SUMMARY_HISTOGRAMS = 1 /* Draw graphs in the summary. */ }; /** * ntdb_summary - return a string describing the NTDB state * @ntdb: the ntdb context returned from ntdb_open() * @flags: flags to control the summary output. * @summary: pointer to string to allocate. * * This returns a developer-readable string describing the overall * state of the ntdb, such as the percentage used and sizes of records. * It is designed to provide information about the ntdb at a glance * without displaying any keys or data in the database. * * On success, sets @summary to point to a malloc()'ed nul-terminated * multi-line string. It is your responsibility to free() it. */ enum NTDB_ERROR ntdb_summary(struct ntdb_context *ntdb, enum ntdb_summary_flags flags, char **summary); /** * ntdb_get_flags - return the flags for a ntdb * @ntdb: the ntdb context returned from ntdb_open() * * This returns the flags on the current ntdb. Some of these are caused by * the flags argument to ntdb_open(), others (such as NTDB_CONVERT) are * intuited. */ unsigned int ntdb_get_flags(struct ntdb_context *ntdb); /** * ntdb_add_flag - set a flag for a ntdb * @ntdb: the ntdb context returned from ntdb_open() * @flag: one of NTDB_NOLOCK, NTDB_NOMMAP, NTDB_NOSYNC or NTDB_ALLOW_NESTING. * * You can use this to set a flag on the NTDB. You cannot set these flags * on a NTDB_INTERNAL ntdb. */ void ntdb_add_flag(struct ntdb_context *ntdb, unsigned flag); /** * ntdb_remove_flag - unset a flag for a ntdb * @ntdb: the ntdb context returned from ntdb_open() * @flag: one of NTDB_NOLOCK, NTDB_NOMMAP, NTDB_NOSYNC or NTDB_ALLOW_NESTING. * * You can use this to clear a flag on the NTDB. You cannot clear flags * on a NTDB_INTERNAL ntdb. */ void ntdb_remove_flag(struct ntdb_context *ntdb, unsigned flag); /** * enum ntdb_attribute_type - descriminator for union ntdb_attribute. */ enum ntdb_attribute_type { NTDB_ATTRIBUTE_LOG = 0, NTDB_ATTRIBUTE_HASH = 1, NTDB_ATTRIBUTE_SEED = 2, NTDB_ATTRIBUTE_STATS = 3, NTDB_ATTRIBUTE_OPENHOOK = 4, NTDB_ATTRIBUTE_FLOCK = 5, NTDB_ATTRIBUTE_ALLOCATOR = 6, NTDB_ATTRIBUTE_HASHSIZE = 7 }; /** * ntdb_get_attribute - get an attribute for an existing ntdb * @ntdb: the ntdb context returned from ntdb_open() * @attr: the union ntdb_attribute to set. * * This gets an attribute from a NTDB which has previously been set (or * may return the default values). Set @attr.base.attr to the * attribute type you want get. */ enum NTDB_ERROR ntdb_get_attribute(struct ntdb_context *ntdb, union ntdb_attribute *attr); /** * ntdb_set_attribute - set an attribute for an existing ntdb * @ntdb: the ntdb context returned from ntdb_open() * @attr: the union ntdb_attribute to set. * * This sets an attribute on a NTDB, overriding any previous attribute * of the same type. It returns NTDB_ERR_EINVAL if the attribute is * unknown or invalid. * * Note that NTDB_ATTRIBUTE_HASH, NTDB_ATTRIBUTE_SEED, and * NTDB_ATTRIBUTE_OPENHOOK cannot currently be set after ntdb_open. */ enum NTDB_ERROR ntdb_set_attribute(struct ntdb_context *ntdb, const union ntdb_attribute *attr); /** * ntdb_unset_attribute - reset an attribute for an existing ntdb * @ntdb: the ntdb context returned from ntdb_open() * @type: the attribute type to unset. * * This unsets an attribute on a NTDB, returning it to the defaults * (where applicable). * * Note that it only makes sense for NTDB_ATTRIBUTE_LOG and NTDB_ATTRIBUTE_FLOCK * to be unset. */ void ntdb_unset_attribute(struct ntdb_context *ntdb, enum ntdb_attribute_type type); /** * ntdb_name - get the name of a ntdb * @ntdb: the ntdb context returned from ntdb_open() * * This returns a copy of the name string, made at ntdb_open() time. * * This is mostly useful for logging. */ const char *ntdb_name(const struct ntdb_context *ntdb); /** * ntdb_fd - get the file descriptor of a ntdb * @ntdb: the ntdb context returned from ntdb_open() * * This returns the file descriptor for the underlying database file, or -1 * for NTDB_INTERNAL. */ int ntdb_fd(const struct ntdb_context *ntdb); /** * ntdb_foreach - iterate through every open NTDB. * @fn: the function to call for every NTDB * @p: the pointer to hand to @fn * * NTDB internally keeps track of all open TDBs; this function allows you to * iterate through them. If @fn returns non-zero, traversal stops. */ #define ntdb_foreach(fn, p) \ ntdb_foreach_(typesafe_cb_preargs(int, void *, (fn), (p), \ struct ntdb_context *), (p)) void ntdb_foreach_(int (*fn)(struct ntdb_context *, void *), void *p); /** * struct ntdb_attribute_base - common fields for all ntdb attributes. */ struct ntdb_attribute_base { enum ntdb_attribute_type attr; union ntdb_attribute *next; }; /** * enum ntdb_log_level - log levels for ntdb_attribute_log * @NTDB_LOG_ERROR: used to log unrecoverable errors such as I/O errors * or internal consistency failures. * @NTDB_LOG_USE_ERROR: used to log usage errors such as invalid parameters * or writing to a read-only database. * @NTDB_LOG_WARNING: used for informational messages on issues which * are unusual but handled by NTDB internally, such * as a failure to mmap or failure to open /dev/urandom. * It's also used when ntdb_open() fails without O_CREAT * because a file does not exist. */ enum ntdb_log_level { NTDB_LOG_ERROR, NTDB_LOG_USE_ERROR, NTDB_LOG_WARNING }; /** * struct ntdb_attribute_log - log function attribute * * This attribute provides a hook for you to log errors. */ struct ntdb_attribute_log { struct ntdb_attribute_base base; /* .attr = NTDB_ATTRIBUTE_LOG */ void (*fn)(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *data); void *data; }; /** * struct ntdb_attribute_hash - hash function attribute * * This attribute allows you to provide an alternative hash function. * This hash function will be handed keys from the database; it will also * be handed the 8-byte NTDB_HASH_MAGIC value for checking the header (the * ntdb_open() will fail if the hash value doesn't match the header). * * Note that if your hash function gives different results on * different machine endians, your ntdb will no longer work across * different architectures! */ struct ntdb_attribute_hash { struct ntdb_attribute_base base; /* .attr = NTDB_ATTRIBUTE_HASH */ uint32_t (*fn)(const void *key, size_t len, uint32_t seed, void *data); void *data; }; /** * struct ntdb_attribute_seed - hash function seed attribute * * The hash function seed is normally taken from /dev/urandom (or equivalent) * but can be set manually here. This is mainly for testing purposes. */ struct ntdb_attribute_seed { struct ntdb_attribute_base base; /* .attr = NTDB_ATTRIBUTE_SEED */ uint64_t seed; }; /** * struct ntdb_attribute_stats - ntdb operational statistics * * This attribute records statistics of various low-level NTDB operations. * This can be used to assist performance evaluation. This is only * useful for ntdb_get_attribute(). * * New fields will be added at the end, hence the "size" argument which * indicates how large your structure is: it must be filled in before * calling ntdb_get_attribute(), which will overwrite it with the size * ntdb knows about. */ struct ntdb_attribute_stats { struct ntdb_attribute_base base; /* .attr = NTDB_ATTRIBUTE_STATS */ size_t size; /* = sizeof(struct ntdb_attribute_stats) */ uint64_t allocs; uint64_t alloc_subhash; uint64_t alloc_chain; uint64_t alloc_bucket_exact; uint64_t alloc_bucket_max; uint64_t alloc_leftover; uint64_t alloc_coalesce_tried; uint64_t alloc_coalesce_iterate_clash; uint64_t alloc_coalesce_lockfail; uint64_t alloc_coalesce_race; uint64_t alloc_coalesce_succeeded; uint64_t alloc_coalesce_num_merged; uint64_t compares; uint64_t compare_wrong_offsetbits; uint64_t compare_wrong_keylen; uint64_t compare_wrong_rechash; uint64_t compare_wrong_keycmp; uint64_t transactions; uint64_t transaction_cancel; uint64_t transaction_nest; uint64_t transaction_expand_file; uint64_t transaction_read_direct; uint64_t transaction_read_direct_fail; uint64_t transaction_write_direct; uint64_t transaction_write_direct_fail; uint64_t traverses; uint64_t traverse_val_vanished; uint64_t expands; uint64_t frees; uint64_t locks; uint64_t lock_lowlevel; uint64_t lock_nonblock; uint64_t lock_nonblock_fail; }; /** * struct ntdb_attribute_openhook - ntdb special effects hook for open * * This attribute contains a function to call once we have the OPEN_LOCK * for the ntdb, but before we've examined its contents. If this succeeds, * the ntdb will be populated if it's then zero-length. * * This is a hack to allow support for TDB-style TDB_CLEAR_IF_FIRST * behaviour. */ struct ntdb_attribute_openhook { struct ntdb_attribute_base base; /* .attr = NTDB_ATTRIBUTE_OPENHOOK */ enum NTDB_ERROR (*fn)(int fd, void *data); void *data; }; /** * struct ntdb_attribute_flock - ntdb special effects hook for file locking * * This attribute contains function to call to place locks on a file; it can * be used to support non-blocking operations or lock proxying. * * They should return 0 on success, -1 on failure and set errno. * * An error will be logged on error if errno is neither EAGAIN nor EINTR * (normally it would only return EAGAIN if waitflag is false, and * loop internally on EINTR). */ struct ntdb_attribute_flock { struct ntdb_attribute_base base; /* .attr = NTDB_ATTRIBUTE_FLOCK */ int (*lock)(int fd,int rw, off_t off, off_t len, bool waitflag, void *); int (*unlock)(int fd, int rw, off_t off, off_t len, void *); void *data; }; /** * struct ntdb_attribute_hashsize - ntdb hashsize setting. * * This attribute is only settable on ntdb_open; it indicates that we create * a hashtable of the given size, rather than the default. */ struct ntdb_attribute_hashsize { struct ntdb_attribute_base base; /* .attr = NTDB_ATTRIBUTE_HASHSIZE */ uint32_t size; }; /** * struct ntdb_attribute_allocator - allocator for ntdb to use. * * You can replace malloc/free with your own allocation functions. * The allocator takes an "owner" pointer, which is either NULL (for * the initial struct ntdb_context and struct ntdb_file), or a * previously allocated pointer. This is useful for relationship * tracking, such as the talloc library. * * The expand function is realloc, but only ever used to expand an * existing allocation. * * Be careful mixing allocators: two ntdb_contexts which have the same file * open will share the same struct ntdb_file. This may be allocated by one * ntdb's allocator, and freed by the other. */ struct ntdb_attribute_allocator { struct ntdb_attribute_base base; /* .attr = NTDB_ATTRIBUTE_ALLOCATOR */ void *(*alloc)(const void *owner, size_t len, void *priv_data); void *(*expand)(void *old, size_t newlen, void *priv_data); void (*free)(void *old, void *priv_data); void *priv_data; }; /** * union ntdb_attribute - ntdb attributes. * * This represents all the known attributes. * * See also: * struct ntdb_attribute_log, struct ntdb_attribute_hash, * struct ntdb_attribute_seed, struct ntdb_attribute_stats, * struct ntdb_attribute_openhook, struct ntdb_attribute_flock, * struct ntdb_attribute_allocator alloc. */ union ntdb_attribute { struct ntdb_attribute_base base; struct ntdb_attribute_log log; struct ntdb_attribute_hash hash; struct ntdb_attribute_seed seed; struct ntdb_attribute_stats stats; struct ntdb_attribute_openhook openhook; struct ntdb_attribute_flock flock; struct ntdb_attribute_allocator alloc; struct ntdb_attribute_hashsize hashsize; }; #ifdef __cplusplus } #endif #endif /* ntdb.h */ ntdb-1.0/ntdb.pc.in000066400000000000000000000003721224151530700141630ustar00rootroot00000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ Name: ntdb Description: A (not-so) trivial database Version: @PACKAGE_VERSION@ Libs: @LIB_RPATH@ -L${libdir} -lntdb Cflags: -I${includedir} URL: http://tdb.samba.org/ ntdb-1.0/open.c000066400000000000000000000572741224151530700134250ustar00rootroot00000000000000 /* Trivial Database 2: opening and closing TDBs Copyright (C) Rusty Russell 2010 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #include /* all tdbs, to detect double-opens (fcntl file don't nest!) */ static struct ntdb_context *tdbs = NULL; static struct ntdb_file *find_file(dev_t device, ino_t ino) { struct ntdb_context *i; for (i = tdbs; i; i = i->next) { if (i->file->device == device && i->file->inode == ino) { i->file->refcnt++; return i->file; } } return NULL; } static bool read_all(int fd, void *buf, size_t len) { while (len) { ssize_t ret; ret = read(fd, buf, len); if (ret < 0) return false; if (ret == 0) { /* ETOOSHORT? */ errno = EWOULDBLOCK; return false; } buf = (char *)buf + ret; len -= ret; } return true; } static uint32_t random_number(struct ntdb_context *ntdb) { int fd; uint32_t ret = 0; struct timeval now; fd = open("/dev/urandom", O_RDONLY); if (fd >= 0) { if (read_all(fd, &ret, sizeof(ret))) { close(fd); return ret; } close(fd); } /* FIXME: Untested! Based on Wikipedia protocol description! */ fd = open("/dev/egd-pool", O_RDWR); if (fd >= 0) { /* Command is 1, next byte is size we want to read. */ char cmd[2] = { 1, sizeof(uint32_t) }; if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) { char reply[1 + sizeof(uint32_t)]; int r = read(fd, reply, sizeof(reply)); if (r > 1) { /* Copy at least some bytes. */ memcpy(&ret, reply+1, r - 1); if (reply[0] == sizeof(uint32_t) && r == sizeof(reply)) { close(fd); return ret; } } } close(fd); } /* Fallback: pid and time. */ gettimeofday(&now, NULL); ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec; ntdb_logerr(ntdb, NTDB_SUCCESS, NTDB_LOG_WARNING, "ntdb_open: random from getpid and time"); return ret; } static void ntdb_context_init(struct ntdb_context *ntdb) { /* Initialize the NTDB fields here */ ntdb_io_init(ntdb); ntdb->transaction = NULL; ntdb->access = NULL; } /* initialise a new database: * * struct ntdb_header; * struct { * struct ntdb_used_record hash_header; * ntdb_off_t hash_buckets[1 << ntdb->hash_bits]; * } hash; * struct ntdb_freetable ftable; * struct { * struct ntdb_free_record free_header; * char forty_three[...]; * } remainder; */ #define NEW_DATABASE_HDR_SIZE(hbits) \ (sizeof(struct ntdb_header) \ + sizeof(struct ntdb_used_record) + (sizeof(ntdb_off_t) << hbits) \ + sizeof(struct ntdb_freetable) \ + sizeof(struct ntdb_free_record)) static enum NTDB_ERROR ntdb_new_database(struct ntdb_context *ntdb, struct ntdb_attribute_seed *seed, struct ntdb_header *rhdr) { /* We make it up in memory, then write it out if not internal */ struct ntdb_freetable *ftable; struct ntdb_used_record *htable; struct ntdb_header *hdr; struct ntdb_free_record *remainder; char *mem; unsigned int magic_len; ssize_t rlen; size_t dbsize, hashsize, hdrsize, remaindersize; enum NTDB_ERROR ecode; hashsize = sizeof(ntdb_off_t) << ntdb->hash_bits; /* Always make db a multiple of NTDB_PGSIZE */ hdrsize = NEW_DATABASE_HDR_SIZE(ntdb->hash_bits); dbsize = (hdrsize + NTDB_PGSIZE-1) & ~(NTDB_PGSIZE-1); mem = ntdb->alloc_fn(ntdb, dbsize, ntdb->alloc_data); if (!mem) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_new_database: failed to allocate"); } hdr = (void *)mem; htable = (void *)(mem + sizeof(*hdr)); ftable = (void *)(mem + sizeof(*hdr) + sizeof(*htable) + hashsize); remainder = (void *)(mem + sizeof(*hdr) + sizeof(*htable) + hashsize + sizeof(*ftable)); /* Fill in the header */ hdr->version = NTDB_VERSION; if (seed) hdr->hash_seed = seed->seed; else hdr->hash_seed = random_number(ntdb); hdr->hash_test = NTDB_HASH_MAGIC; hdr->hash_test = ntdb->hash_fn(&hdr->hash_test, sizeof(hdr->hash_test), hdr->hash_seed, ntdb->hash_data); hdr->hash_bits = ntdb->hash_bits; hdr->recovery = 0; hdr->features_used = hdr->features_offered = NTDB_FEATURE_MASK; hdr->seqnum = 0; hdr->capabilities = 0; memset(hdr->reserved, 0, sizeof(hdr->reserved)); /* Hash is all zero after header. */ set_header(NULL, htable, NTDB_HTABLE_MAGIC, 0, hashsize, hashsize); memset(htable + 1, 0, hashsize); /* Free is empty. */ hdr->free_table = (char *)ftable - (char *)hdr; memset(ftable, 0, sizeof(*ftable)); ecode = set_header(NULL, &ftable->hdr, NTDB_FTABLE_MAGIC, 0, sizeof(*ftable) - sizeof(ftable->hdr), sizeof(*ftable) - sizeof(ftable->hdr)); if (ecode != NTDB_SUCCESS) { goto out; } /* Rest of database is a free record, containing junk. */ remaindersize = dbsize - hdrsize; remainder->ftable_and_len = (remaindersize + sizeof(*remainder) - sizeof(struct ntdb_used_record)); remainder->next = 0; remainder->magic_and_prev = (NTDB_FREE_MAGIC << (64-NTDB_OFF_UPPER_STEAL)) | ((char *)remainder - (char *)hdr); memset(remainder + 1, 0x43, remaindersize); /* Put in our single free entry. */ ftable->buckets[size_to_bucket(remaindersize)] = (char *)remainder - (char *)hdr; /* Magic food */ memset(hdr->magic_food, 0, sizeof(hdr->magic_food)); strcpy(hdr->magic_food, NTDB_MAGIC_FOOD); /* This creates an endian-converted database, as if read from disk */ magic_len = sizeof(hdr->magic_food); ntdb_convert(ntdb, (char *)hdr + magic_len, hdrsize - magic_len); /* Return copy of header. */ *rhdr = *hdr; if (ntdb->flags & NTDB_INTERNAL) { ntdb->file->map_size = dbsize; ntdb->file->map_ptr = hdr; return NTDB_SUCCESS; } if (lseek(ntdb->file->fd, 0, SEEK_SET) == -1) { ecode = ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_new_database:" " failed to seek: %s", strerror(errno)); goto out; } if (ftruncate(ntdb->file->fd, 0) == -1) { ecode = ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_new_database:" " failed to truncate: %s", strerror(errno)); goto out; } rlen = write(ntdb->file->fd, hdr, dbsize); if (rlen != dbsize) { if (rlen >= 0) errno = ENOSPC; ecode = ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_new_database: %zi writing header: %s", rlen, strerror(errno)); goto out; } out: ntdb->free_fn(hdr, ntdb->alloc_data); return ecode; } static enum NTDB_ERROR ntdb_new_file(struct ntdb_context *ntdb) { ntdb->file = ntdb->alloc_fn(NULL, sizeof(*ntdb->file), ntdb->alloc_data); if (!ntdb->file) return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_open: cannot alloc ntdb_file structure"); ntdb->file->num_lockrecs = 0; ntdb->file->lockrecs = NULL; ntdb->file->allrecord_lock.count = 0; ntdb->file->refcnt = 1; ntdb->file->map_ptr = NULL; ntdb->file->direct_count = 0; ntdb->file->old_mmaps = NULL; return NTDB_SUCCESS; } _PUBLIC_ enum NTDB_ERROR ntdb_set_attribute(struct ntdb_context *ntdb, const union ntdb_attribute *attr) { switch (attr->base.attr) { case NTDB_ATTRIBUTE_LOG: ntdb->log_fn = attr->log.fn; ntdb->log_data = attr->log.data; break; case NTDB_ATTRIBUTE_HASH: case NTDB_ATTRIBUTE_SEED: case NTDB_ATTRIBUTE_OPENHOOK: case NTDB_ATTRIBUTE_HASHSIZE: return ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_set_attribute:" " cannot set %s after opening", attr->base.attr == NTDB_ATTRIBUTE_HASH ? "NTDB_ATTRIBUTE_HASH" : attr->base.attr == NTDB_ATTRIBUTE_SEED ? "NTDB_ATTRIBUTE_SEED" : attr->base.attr == NTDB_ATTRIBUTE_OPENHOOK ? "NTDB_ATTRIBUTE_OPENHOOK" : "NTDB_ATTRIBUTE_HASHSIZE"); case NTDB_ATTRIBUTE_STATS: return ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_set_attribute:" " cannot set NTDB_ATTRIBUTE_STATS"); case NTDB_ATTRIBUTE_FLOCK: ntdb->lock_fn = attr->flock.lock; ntdb->unlock_fn = attr->flock.unlock; ntdb->lock_data = attr->flock.data; break; case NTDB_ATTRIBUTE_ALLOCATOR: ntdb->alloc_fn = attr->alloc.alloc; ntdb->expand_fn = attr->alloc.expand; ntdb->free_fn = attr->alloc.free; ntdb->alloc_data = attr->alloc.priv_data; break; default: return ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_set_attribute:" " unknown attribute type %u", attr->base.attr); } return NTDB_SUCCESS; } _PUBLIC_ enum NTDB_ERROR ntdb_get_attribute(struct ntdb_context *ntdb, union ntdb_attribute *attr) { switch (attr->base.attr) { case NTDB_ATTRIBUTE_LOG: if (!ntdb->log_fn) return NTDB_ERR_NOEXIST; attr->log.fn = ntdb->log_fn; attr->log.data = ntdb->log_data; break; case NTDB_ATTRIBUTE_HASH: attr->hash.fn = ntdb->hash_fn; attr->hash.data = ntdb->hash_data; break; case NTDB_ATTRIBUTE_SEED: attr->seed.seed = ntdb->hash_seed; break; case NTDB_ATTRIBUTE_OPENHOOK: if (!ntdb->openhook) return NTDB_ERR_NOEXIST; attr->openhook.fn = ntdb->openhook; attr->openhook.data = ntdb->openhook_data; break; case NTDB_ATTRIBUTE_STATS: { size_t size = attr->stats.size; if (size > ntdb->stats.size) size = ntdb->stats.size; memcpy(&attr->stats, &ntdb->stats, size); break; } case NTDB_ATTRIBUTE_FLOCK: attr->flock.lock = ntdb->lock_fn; attr->flock.unlock = ntdb->unlock_fn; attr->flock.data = ntdb->lock_data; break; case NTDB_ATTRIBUTE_ALLOCATOR: attr->alloc.alloc = ntdb->alloc_fn; attr->alloc.expand = ntdb->expand_fn; attr->alloc.free = ntdb->free_fn; attr->alloc.priv_data = ntdb->alloc_data; break; case NTDB_ATTRIBUTE_HASHSIZE: attr->hashsize.size = 1 << ntdb->hash_bits; break; default: return ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_get_attribute:" " unknown attribute type %u", attr->base.attr); } attr->base.next = NULL; return NTDB_SUCCESS; } _PUBLIC_ void ntdb_unset_attribute(struct ntdb_context *ntdb, enum ntdb_attribute_type type) { switch (type) { case NTDB_ATTRIBUTE_LOG: ntdb->log_fn = NULL; break; case NTDB_ATTRIBUTE_OPENHOOK: ntdb->openhook = NULL; break; case NTDB_ATTRIBUTE_HASH: case NTDB_ATTRIBUTE_SEED: ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_unset_attribute: cannot unset %s after opening", type == NTDB_ATTRIBUTE_HASH ? "NTDB_ATTRIBUTE_HASH" : "NTDB_ATTRIBUTE_SEED"); break; case NTDB_ATTRIBUTE_STATS: ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_unset_attribute:" "cannot unset NTDB_ATTRIBUTE_STATS"); break; case NTDB_ATTRIBUTE_FLOCK: ntdb->lock_fn = ntdb_fcntl_lock; ntdb->unlock_fn = ntdb_fcntl_unlock; break; default: ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_unset_attribute: unknown attribute type %u", type); } } /* The top three bits of the capability tell us whether it matters. */ enum NTDB_ERROR unknown_capability(struct ntdb_context *ntdb, const char *caller, ntdb_off_t type) { if (type & NTDB_CAP_NOOPEN) { return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "%s: file has unknown capability %llu", caller, type & NTDB_CAP_NOOPEN); } if ((type & NTDB_CAP_NOWRITE) && !(ntdb->flags & NTDB_RDONLY)) { return ntdb_logerr(ntdb, NTDB_ERR_RDONLY, NTDB_LOG_ERROR, "%s: file has unknown capability %llu" " (cannot write to it)", caller, type & NTDB_CAP_NOOPEN); } if (type & NTDB_CAP_NOCHECK) { ntdb->flags |= NTDB_CANT_CHECK; } return NTDB_SUCCESS; } static enum NTDB_ERROR capabilities_ok(struct ntdb_context *ntdb, ntdb_off_t capabilities) { ntdb_off_t off, next; enum NTDB_ERROR ecode = NTDB_SUCCESS; const struct ntdb_capability *cap; /* Check capability list. */ for (off = capabilities; off && ecode == NTDB_SUCCESS; off = next) { cap = ntdb_access_read(ntdb, off, sizeof(*cap), true); if (NTDB_PTR_IS_ERR(cap)) { return NTDB_PTR_ERR(cap); } switch (cap->type & NTDB_CAP_TYPE_MASK) { /* We don't understand any capabilities (yet). */ default: ecode = unknown_capability(ntdb, "ntdb_open", cap->type); } next = cap->next; ntdb_access_release(ntdb, cap); } return ecode; } static void *default_alloc(const void *owner, size_t len, void *priv_data) { return malloc(len); } static void *default_expand(void *ptr, size_t len, void *priv_data) { return realloc(ptr, len); } static void default_free(void *ptr, void *priv_data) { free(ptr); } /* First allocation needs manual search of attributes. */ static struct ntdb_context *alloc_ntdb(const union ntdb_attribute *attr, const char *name) { size_t len = sizeof(struct ntdb_context) + strlen(name) + 1; while (attr) { if (attr->base.attr == NTDB_ATTRIBUTE_ALLOCATOR) { return attr->alloc.alloc(NULL, len, attr->alloc.priv_data); } attr = attr->base.next; } return default_alloc(NULL, len, NULL); } static unsigned int next_pow2(uint64_t size) { unsigned int bits = 1; while ((1ULL << bits) < size) bits++; return bits; } _PUBLIC_ struct ntdb_context *ntdb_open(const char *name, int ntdb_flags, int open_flags, mode_t mode, union ntdb_attribute *attr) { struct ntdb_context *ntdb; struct stat st; int saved_errno = 0; uint64_t hash_test; unsigned v; ssize_t rlen; struct ntdb_header hdr; struct ntdb_attribute_seed *seed = NULL; ntdb_bool_err berr; enum NTDB_ERROR ecode; int openlock; ntdb = alloc_ntdb(attr, name); if (!ntdb) { /* Can't log this */ errno = ENOMEM; return NULL; } /* Set name immediately for logging functions. */ ntdb->name = strcpy((char *)(ntdb + 1), name); ntdb->flags = ntdb_flags; ntdb->log_fn = NULL; ntdb->open_flags = open_flags; ntdb->file = NULL; ntdb->openhook = NULL; ntdb->lock_fn = ntdb_fcntl_lock; ntdb->unlock_fn = ntdb_fcntl_unlock; ntdb->hash_fn = ntdb_jenkins_hash; memset(&ntdb->stats, 0, sizeof(ntdb->stats)); ntdb->stats.base.attr = NTDB_ATTRIBUTE_STATS; ntdb->stats.size = sizeof(ntdb->stats); ntdb->alloc_fn = default_alloc; ntdb->expand_fn = default_expand; ntdb->free_fn = default_free; ntdb->hash_bits = NTDB_DEFAULT_HBITS; /* 64k of hash by default. */ while (attr) { switch (attr->base.attr) { case NTDB_ATTRIBUTE_HASH: ntdb->hash_fn = attr->hash.fn; ntdb->hash_data = attr->hash.data; break; case NTDB_ATTRIBUTE_SEED: seed = &attr->seed; break; case NTDB_ATTRIBUTE_OPENHOOK: ntdb->openhook = attr->openhook.fn; ntdb->openhook_data = attr->openhook.data; break; case NTDB_ATTRIBUTE_HASHSIZE: ntdb->hash_bits = next_pow2(attr->hashsize.size); if (ntdb->hash_bits > 31) { ecode = ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_open: hash_size %u" " too large", attr->hashsize.size); goto fail; } break; default: /* These are set as normal. */ ecode = ntdb_set_attribute(ntdb, attr); if (ecode != NTDB_SUCCESS) goto fail; } attr = attr->base.next; } if (ntdb_flags & ~(NTDB_INTERNAL | NTDB_NOLOCK | NTDB_NOMMAP | NTDB_CONVERT | NTDB_NOSYNC | NTDB_SEQNUM | NTDB_ALLOW_NESTING | NTDB_RDONLY)) { ecode = ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_open: unknown flags %u", ntdb_flags); goto fail; } if (seed) { if (!(ntdb_flags & NTDB_INTERNAL) && !(open_flags & O_CREAT)) { ecode = ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_open:" " cannot set NTDB_ATTRIBUTE_SEED" " without O_CREAT."); goto fail; } } if ((open_flags & O_ACCMODE) == O_WRONLY) { ecode = ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_open: can't open ntdb %s write-only", name); goto fail; } if ((open_flags & O_ACCMODE) == O_RDONLY) { openlock = F_RDLCK; ntdb->flags |= NTDB_RDONLY; } else { if (ntdb_flags & NTDB_RDONLY) { ecode = ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_open: can't use NTDB_RDONLY" " without O_RDONLY"); goto fail; } openlock = F_WRLCK; } /* internal databases don't need any of the rest. */ if (ntdb->flags & NTDB_INTERNAL) { ntdb->flags |= (NTDB_NOLOCK | NTDB_NOMMAP); ecode = ntdb_new_file(ntdb); if (ecode != NTDB_SUCCESS) { goto fail; } ntdb->file->fd = -1; ecode = ntdb_new_database(ntdb, seed, &hdr); if (ecode == NTDB_SUCCESS) { ntdb_convert(ntdb, &hdr.hash_seed, sizeof(hdr.hash_seed)); ntdb->hash_seed = hdr.hash_seed; ntdb_context_init(ntdb); ntdb_ftable_init(ntdb); } if (ecode != NTDB_SUCCESS) { goto fail; } return ntdb; } if (stat(name, &st) != -1) ntdb->file = find_file(st.st_dev, st.st_ino); if (!ntdb->file) { ecode = ntdb_new_file(ntdb); if (ecode != NTDB_SUCCESS) { goto fail; } /* Set this now, as ntdb_nest_lock examines it. */ ntdb->file->map_size = 0; if ((ntdb->file->fd = open(name, open_flags, mode)) == -1) { enum ntdb_log_level lvl; /* errno set by open(2) */ saved_errno = errno; /* Probing for files like this is a common pattern. */ if (!(open_flags & O_CREAT) && errno == ENOENT) { lvl = NTDB_LOG_WARNING; } else { lvl = NTDB_LOG_ERROR; } ntdb_logerr(ntdb, NTDB_ERR_IO, lvl, "ntdb_open: could not open file %s: %s", name, strerror(errno)); goto fail_errno; } /* ensure there is only one process initialising at once: * do it immediately to reduce the create/openlock race. */ ecode = ntdb_lock_open(ntdb, openlock, NTDB_LOCK_WAIT|NTDB_LOCK_NOCHECK); if (ecode != NTDB_SUCCESS) { saved_errno = errno; goto fail_errno; } /* on exec, don't inherit the fd */ v = fcntl(ntdb->file->fd, F_GETFD, 0); fcntl(ntdb->file->fd, F_SETFD, v | FD_CLOEXEC); if (fstat(ntdb->file->fd, &st) == -1) { saved_errno = errno; ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_open: could not stat open %s: %s", name, strerror(errno)); goto fail_errno; } ntdb->file->device = st.st_dev; ntdb->file->inode = st.st_ino; /* call their open hook if they gave us one. */ if (ntdb->openhook) { ecode = ntdb->openhook(ntdb->file->fd, ntdb->openhook_data); if (ecode != NTDB_SUCCESS) { ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_open: open hook failed"); goto fail; } open_flags |= O_CREAT; } } else { /* ensure there is only one process initialising at once */ ecode = ntdb_lock_open(ntdb, openlock, NTDB_LOCK_WAIT|NTDB_LOCK_NOCHECK); if (ecode != NTDB_SUCCESS) { saved_errno = errno; goto fail_errno; } } /* If they used O_TRUNC, read will return 0. */ rlen = pread(ntdb->file->fd, &hdr, sizeof(hdr), 0); if (rlen == 0 && (open_flags & O_CREAT)) { ecode = ntdb_new_database(ntdb, seed, &hdr); if (ecode != NTDB_SUCCESS) { goto fail; } } else if (rlen < 0) { ecode = ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_open: error %s reading %s", strerror(errno), name); goto fail; } else if (rlen < sizeof(hdr) || strcmp(hdr.magic_food, NTDB_MAGIC_FOOD) != 0) { ecode = ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_open: %s is not a ntdb file", name); goto fail; } if (hdr.version != NTDB_VERSION) { if (hdr.version == bswap_64(NTDB_VERSION)) ntdb->flags |= NTDB_CONVERT; else { /* wrong version */ ecode = ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_open:" " %s is unknown version 0x%llx", name, (long long)hdr.version); goto fail; } } else if (ntdb->flags & NTDB_CONVERT) { ecode = ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_open:" " %s does not need NTDB_CONVERT", name); goto fail; } ntdb_context_init(ntdb); ntdb_convert(ntdb, &hdr, sizeof(hdr)); ntdb->hash_bits = hdr.hash_bits; ntdb->hash_seed = hdr.hash_seed; hash_test = NTDB_HASH_MAGIC; hash_test = ntdb_hash(ntdb, &hash_test, sizeof(hash_test)); if (hdr.hash_test != hash_test) { /* wrong hash variant */ ecode = ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_open:" " %s uses a different hash function", name); goto fail; } ecode = capabilities_ok(ntdb, hdr.capabilities); if (ecode != NTDB_SUCCESS) { goto fail; } /* Clear any features we don't understand. */ if ((open_flags & O_ACCMODE) != O_RDONLY) { hdr.features_used &= NTDB_FEATURE_MASK; ecode = ntdb_write_convert(ntdb, offsetof(struct ntdb_header, features_used), &hdr.features_used, sizeof(hdr.features_used)); if (ecode != NTDB_SUCCESS) goto fail; } ntdb_unlock_open(ntdb, openlock); /* This makes sure we have current map_size and mmap. */ ecode = ntdb_oob(ntdb, ntdb->file->map_size, 1, true); if (unlikely(ecode != NTDB_SUCCESS)) goto fail; if (ntdb->file->map_size % NTDB_PGSIZE != 0) { ecode = ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_open:" " %s size %llu isn't a multiple of %u", name, (long long)ntdb->file->map_size, NTDB_PGSIZE); goto fail; } /* Now it's fully formed, recover if necessary. */ berr = ntdb_needs_recovery(ntdb); if (unlikely(berr != false)) { if (berr < 0) { ecode = NTDB_OFF_TO_ERR(berr); goto fail; } ecode = ntdb_lock_and_recover(ntdb); if (ecode != NTDB_SUCCESS) { goto fail; } } ecode = ntdb_ftable_init(ntdb); if (ecode != NTDB_SUCCESS) { goto fail; } ntdb->next = tdbs; tdbs = ntdb; return ntdb; fail: /* Map ecode to some logical errno. */ switch (NTDB_ERR_TO_OFF(ecode)) { case NTDB_ERR_TO_OFF(NTDB_ERR_CORRUPT): case NTDB_ERR_TO_OFF(NTDB_ERR_IO): saved_errno = EIO; break; case NTDB_ERR_TO_OFF(NTDB_ERR_LOCK): saved_errno = EWOULDBLOCK; break; case NTDB_ERR_TO_OFF(NTDB_ERR_OOM): saved_errno = ENOMEM; break; case NTDB_ERR_TO_OFF(NTDB_ERR_EINVAL): saved_errno = EINVAL; break; default: saved_errno = EINVAL; break; } fail_errno: #ifdef NTDB_TRACE close(ntdb->tracefd); #endif if (ntdb->file) { ntdb_lock_cleanup(ntdb); if (--ntdb->file->refcnt == 0) { assert(ntdb->file->num_lockrecs == 0); if (ntdb->file->map_ptr) { if (ntdb->flags & NTDB_INTERNAL) { ntdb->free_fn(ntdb->file->map_ptr, ntdb->alloc_data); } else ntdb_munmap(ntdb); } if (ntdb->file->fd != -1 && close(ntdb->file->fd) != 0) ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_open: failed to close ntdb fd" " on error: %s", strerror(errno)); ntdb->free_fn(ntdb->file->lockrecs, ntdb->alloc_data); ntdb->free_fn(ntdb->file, ntdb->alloc_data); } } ntdb->free_fn(ntdb, ntdb->alloc_data); errno = saved_errno; return NULL; } _PUBLIC_ int ntdb_close(struct ntdb_context *ntdb) { int ret = 0; struct ntdb_context **i; ntdb_trace(ntdb, "ntdb_close"); if (ntdb->transaction) { ntdb_transaction_cancel(ntdb); } ntdb_lock_cleanup(ntdb); if (--ntdb->file->refcnt == 0) { if (ntdb->file->map_ptr) { if (ntdb->flags & NTDB_INTERNAL) { ntdb->free_fn(ntdb->file->map_ptr, ntdb->alloc_data); } else { ntdb_munmap(ntdb); } } ret = close(ntdb->file->fd); ntdb->free_fn(ntdb->file->lockrecs, ntdb->alloc_data); ntdb->free_fn(ntdb->file, ntdb->alloc_data); } /* Remove from tdbs list */ for (i = &tdbs; *i; i = &(*i)->next) { if (*i == ntdb) { *i = ntdb->next; break; } } #ifdef NTDB_TRACE close(ntdb->tracefd); #endif ntdb->free_fn(ntdb, ntdb->alloc_data); return ret; } _PUBLIC_ void ntdb_foreach_(int (*fn)(struct ntdb_context *, void *), void *p) { struct ntdb_context *i; for (i = tdbs; i; i = i->next) { if (fn(i, p) != 0) break; } } ntdb-1.0/private.h000066400000000000000000000506471224151530700141400ustar00rootroot00000000000000#ifndef NTDB_PRIVATE_H #define NTDB_PRIVATE_H /* Trivial Database 2: private types and prototypes Copyright (C) Rusty Russell 2010 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "config.h" #ifndef HAVE_CCAN #error You need ccan to build ntdb! #endif #include "ntdb.h" #include #include #include #ifdef HAVE_LIBREPLACE #include "replace.h" #include "system/filesys.h" #include "system/time.h" #include "system/shmem.h" #include "system/select.h" #include "system/wait.h" #else #include #include #include #include #include #include #include #include #include #include #include #include #endif #include #ifndef TEST_IT #define TEST_IT(cond) #endif /* #define NTDB_TRACE 1 */ #ifndef __STRING #define __STRING(x) #x #endif #ifndef __STRINGSTRING #define __STRINGSTRING(x) __STRING(x) #endif #ifndef __location__ #define __location__ __FILE__ ":" __STRINGSTRING(__LINE__) #endif typedef uint64_t ntdb_len_t; typedef uint64_t ntdb_off_t; #define NTDB_MAGIC_FOOD "NTDB file\n" #define NTDB_VERSION ((uint64_t)(0x26011967 + 7)) #define NTDB_USED_MAGIC ((uint64_t)0x1999) #define NTDB_HTABLE_MAGIC ((uint64_t)0x1888) #define NTDB_CHAIN_MAGIC ((uint64_t)0x1777) #define NTDB_FTABLE_MAGIC ((uint64_t)0x1666) #define NTDB_CAP_MAGIC ((uint64_t)0x1555) #define NTDB_FREE_MAGIC ((uint64_t)0xFE) #define NTDB_HASH_MAGIC (0xA1ABE11A01092008ULL) #define NTDB_RECOVERY_MAGIC (0xf53bc0e7ad124589ULL) #define NTDB_RECOVERY_INVALID_MAGIC (0x0ULL) /* Capability bits. */ #define NTDB_CAP_TYPE_MASK 0x1FFFFFFFFFFFFFFFULL #define NTDB_CAP_NOCHECK 0x8000000000000000ULL #define NTDB_CAP_NOWRITE 0x4000000000000000ULL #define NTDB_CAP_NOOPEN 0x2000000000000000ULL #define NTDB_OFF_IS_ERR(off) unlikely(off >= (ntdb_off_t)(long)NTDB_ERR_LAST) #define NTDB_OFF_TO_ERR(off) ((enum NTDB_ERROR)(long)(off)) #define NTDB_ERR_TO_OFF(ecode) ((ntdb_off_t)(long)(ecode)) /* Packing errors into pointers and v.v. */ #define NTDB_PTR_IS_ERR(ptr) \ unlikely((unsigned long)(ptr) >= (unsigned long)NTDB_ERR_LAST) #define NTDB_PTR_ERR(p) ((enum NTDB_ERROR)(long)(p)) #define NTDB_ERR_PTR(err) ((void *)(long)(err)) /* This doesn't really need to be pagesize, but we use it for similar * reasons. */ #define NTDB_PGSIZE 16384 /* Common case of returning true, false or -ve error. */ typedef int ntdb_bool_err; /* Prevent others from opening the file. */ #define NTDB_OPEN_LOCK 0 /* Expanding file. */ #define NTDB_EXPANSION_LOCK 2 /* Doing a transaction. */ #define NTDB_TRANSACTION_LOCK 8 /* Hash chain locks. */ #define NTDB_HASH_LOCK_START 64 /* Extend file by least 100 times larger than needed. */ #define NTDB_EXTENSION_FACTOR 100 /* We steal this many upper bits, giving a maximum offset of 64 exabytes. */ #define NTDB_OFF_UPPER_STEAL 8 /* And we use the lower bit, too. */ #define NTDB_OFF_CHAIN_BIT 0 /* Hash table sits just after the header. */ #define NTDB_HASH_OFFSET (sizeof(struct ntdb_header)) /* Additional features we understand. Currently: none. */ #define NTDB_FEATURE_MASK ((uint64_t)0) /* The bit number where we store the extra hash bits. */ /* Convenience mask to get actual offset. */ #define NTDB_OFF_MASK \ (((1ULL << (64 - NTDB_OFF_UPPER_STEAL)) - 1) - (1<magic_and_meta >> 43) & ((1 << 5)-1)) * 2; } static inline uint64_t rec_key_length(const struct ntdb_used_record *r) { return r->key_and_data_len & ((1ULL << rec_key_bits(r)) - 1); } static inline uint64_t rec_data_length(const struct ntdb_used_record *r) { return r->key_and_data_len >> rec_key_bits(r); } static inline uint64_t rec_extra_padding(const struct ntdb_used_record *r) { return (r->magic_and_meta >> 11) & 0xFFFFFFFF; } static inline uint16_t rec_magic(const struct ntdb_used_record *r) { return (r->magic_and_meta >> 48); } struct ntdb_free_record { uint64_t magic_and_prev; /* NTDB_OFF_UPPER_STEAL bits magic, then prev */ uint64_t ftable_and_len; /* Len not counting these two fields. */ /* This is why the minimum record size is 8 bytes. */ uint64_t next; }; static inline uint64_t frec_prev(const struct ntdb_free_record *f) { return f->magic_and_prev & ((1ULL << (64 - NTDB_OFF_UPPER_STEAL)) - 1); } static inline uint64_t frec_magic(const struct ntdb_free_record *f) { return f->magic_and_prev >> (64 - NTDB_OFF_UPPER_STEAL); } static inline uint64_t frec_len(const struct ntdb_free_record *f) { return f->ftable_and_len & ((1ULL << (64 - NTDB_OFF_UPPER_STEAL))-1); } static inline unsigned frec_ftable(const struct ntdb_free_record *f) { return f->ftable_and_len >> (64 - NTDB_OFF_UPPER_STEAL); } struct ntdb_recovery_record { uint64_t magic; /* Length of record (add this header to get total length). */ uint64_t max_len; /* Length used. */ uint64_t len; /* Old length of file before transaction. */ uint64_t eof; }; /* this is stored at the front of every database */ struct ntdb_header { char magic_food[64]; /* for /etc/magic */ /* FIXME: Make me 32 bit? */ uint64_t version; /* version of the code */ uint64_t hash_bits; /* bits for toplevel hash table. */ uint64_t hash_test; /* result of hashing HASH_MAGIC. */ uint64_t hash_seed; /* "random" seed written at creation time. */ ntdb_off_t free_table; /* (First) free table. */ ntdb_off_t recovery; /* Transaction recovery area. */ uint64_t features_used; /* Features all writers understand */ uint64_t features_offered; /* Features offered */ uint64_t seqnum; /* Sequence number for NTDB_SEQNUM */ ntdb_off_t capabilities; /* Optional linked list of capabilities. */ ntdb_off_t reserved[22]; /* * Hash table is next: * * struct ntdb_used_record htable_hdr; * ntdb_off_t htable[1 << hash_bits]; */ }; struct ntdb_freetable { struct ntdb_used_record hdr; ntdb_off_t next; ntdb_off_t buckets[NTDB_FREE_BUCKETS]; }; struct ntdb_capability { struct ntdb_used_record hdr; ntdb_off_t type; ntdb_off_t next; /* ... */ }; /* Information about a particular (locked) hash entry. */ struct hash_info { /* Full hash value of entry. */ uint32_t h; /* Start of hash table / chain. */ ntdb_off_t table; /* Number of entries in this table/chain. */ ntdb_off_t table_size; /* Bucket we (or an empty space) were found in. */ ntdb_off_t bucket; /* Old value that was in that entry (if not found) */ ntdb_off_t old_val; }; enum ntdb_lock_flags { /* WAIT == F_SETLKW, NOWAIT == F_SETLK */ NTDB_LOCK_NOWAIT = 0, NTDB_LOCK_WAIT = 1, /* If set, don't log an error on failure. */ NTDB_LOCK_PROBE = 2, /* If set, don't check for recovery (used by recovery code). */ NTDB_LOCK_NOCHECK = 4, }; struct ntdb_lock { struct ntdb_context *owner; off_t off; uint32_t count; uint32_t ltype; }; /* This is only needed for ntdb_access_commit, but used everywhere to * simplify. */ struct ntdb_access_hdr { struct ntdb_access_hdr *next; ntdb_off_t off; ntdb_len_t len; bool convert; }; /* mmaps we are keeping around because they are still direct accessed */ struct ntdb_old_mmap { struct ntdb_old_mmap *next; void *map_ptr; ntdb_len_t map_size; }; struct ntdb_file { /* How many are sharing us? */ unsigned int refcnt; /* Mmap (if any), or malloc (for NTDB_INTERNAL). */ void *map_ptr; /* How much space has been mapped (<= current file size) */ ntdb_len_t map_size; /* The file descriptor (-1 for NTDB_INTERNAL). */ int fd; /* How many are accessing directly? */ unsigned int direct_count; /* Old maps, still direct accessed. */ struct ntdb_old_mmap *old_mmaps; /* Lock information */ pid_t locker; struct ntdb_lock allrecord_lock; size_t num_lockrecs; struct ntdb_lock *lockrecs; /* Identity of this file. */ dev_t device; ino_t inode; }; struct ntdb_methods { enum NTDB_ERROR (*tread)(struct ntdb_context *, ntdb_off_t, void *, ntdb_len_t); enum NTDB_ERROR (*twrite)(struct ntdb_context *, ntdb_off_t, const void *, ntdb_len_t); enum NTDB_ERROR (*oob)(struct ntdb_context *, ntdb_off_t, ntdb_len_t, bool); enum NTDB_ERROR (*expand_file)(struct ntdb_context *, ntdb_len_t); void *(*direct)(struct ntdb_context *, ntdb_off_t, size_t, bool); ntdb_off_t (*read_off)(struct ntdb_context *ntdb, ntdb_off_t off); enum NTDB_ERROR (*write_off)(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_off_t val); }; /* internal prototypes */ /* Get bits from a value. */ static inline uint32_t bits_from(uint64_t val, unsigned start, unsigned num) { assert(num <= 32); return (val >> start) & ((1U << num) - 1); } /* hash.c: */ uint32_t ntdb_jenkins_hash(const void *key, size_t length, uint32_t seed, void *unused); enum NTDB_ERROR first_in_hash(struct ntdb_context *ntdb, struct hash_info *h, NTDB_DATA *kbuf, size_t *dlen); enum NTDB_ERROR next_in_hash(struct ntdb_context *ntdb, struct hash_info *h, NTDB_DATA *kbuf, size_t *dlen); /* Hash random memory. */ uint32_t ntdb_hash(struct ntdb_context *ntdb, const void *ptr, size_t len); /* Find and lock a hash entry (or where it would be). */ ntdb_off_t find_and_lock(struct ntdb_context *ntdb, NTDB_DATA key, int ltype, struct hash_info *h, struct ntdb_used_record *rec, const char **rkey); enum NTDB_ERROR replace_in_hash(struct ntdb_context *ntdb, const struct hash_info *h, ntdb_off_t new_off); enum NTDB_ERROR add_to_hash(struct ntdb_context *ntdb, const struct hash_info *h, ntdb_off_t new_off); enum NTDB_ERROR delete_from_hash(struct ntdb_context *ntdb, const struct hash_info *h); /* For ntdb_check */ bool is_subhash(ntdb_off_t val); enum NTDB_ERROR unknown_capability(struct ntdb_context *ntdb, const char *caller, ntdb_off_t type); /* free.c: */ enum NTDB_ERROR ntdb_ftable_init(struct ntdb_context *ntdb); /* check.c needs these to iterate through free lists. */ ntdb_off_t first_ftable(struct ntdb_context *ntdb); ntdb_off_t next_ftable(struct ntdb_context *ntdb, ntdb_off_t ftable); /* This returns space or -ve error number. */ ntdb_off_t alloc(struct ntdb_context *ntdb, size_t keylen, size_t datalen, unsigned magic, bool growing); /* Put this record in a free list. */ enum NTDB_ERROR add_free_record(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len_with_header, enum ntdb_lock_flags waitflag, bool coalesce_ok); /* Set up header for a used/ftable/htable/chain/capability record. */ enum NTDB_ERROR set_header(struct ntdb_context *ntdb, struct ntdb_used_record *rec, unsigned magic, uint64_t keylen, uint64_t datalen, uint64_t actuallen); /* Used by ntdb_check to verify. */ unsigned int size_to_bucket(ntdb_len_t data_len); ntdb_off_t bucket_off(ntdb_off_t ftable_off, unsigned bucket); /* Used by ntdb_summary */ ntdb_off_t dead_space(struct ntdb_context *ntdb, ntdb_off_t off); /* Adjust expansion, used by create_recovery_area */ ntdb_off_t ntdb_expand_adjust(ntdb_off_t map_size, ntdb_off_t size); /* io.c: */ /* Initialize ntdb->methods. */ void ntdb_io_init(struct ntdb_context *ntdb); /* Convert endian of the buffer if required. */ void *ntdb_convert(const struct ntdb_context *ntdb, void *buf, ntdb_len_t size); /* Unmap and try to map the ntdb. */ enum NTDB_ERROR ntdb_munmap(struct ntdb_context *ntdb); enum NTDB_ERROR ntdb_mmap(struct ntdb_context *ntdb); /* Either alloc a copy, or give direct access. Release frees or noop. */ const void *ntdb_access_read(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len, bool convert); void *ntdb_access_write(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len, bool convert); /* Release result of ntdb_access_read/write. */ void ntdb_access_release(struct ntdb_context *ntdb, const void *p); /* Commit result of ntdb_acces_write. */ enum NTDB_ERROR ntdb_access_commit(struct ntdb_context *ntdb, void *p); /* Clear an ondisk area. */ enum NTDB_ERROR zero_out(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len); /* Return a non-zero offset between >= start < end in this array (or end). */ ntdb_off_t ntdb_find_nonzero_off(struct ntdb_context *ntdb, ntdb_off_t base, uint64_t start, uint64_t end); /* Return a zero offset in this array, or num. */ ntdb_off_t ntdb_find_zero_off(struct ntdb_context *ntdb, ntdb_off_t off, uint64_t num); /* Allocate and make a copy of some offset. */ void *ntdb_alloc_read(struct ntdb_context *ntdb, ntdb_off_t offset, ntdb_len_t len); /* Writes a converted copy of a record. */ enum NTDB_ERROR ntdb_write_convert(struct ntdb_context *ntdb, ntdb_off_t off, const void *rec, size_t len); /* Reads record and converts it */ enum NTDB_ERROR ntdb_read_convert(struct ntdb_context *ntdb, ntdb_off_t off, void *rec, size_t len); /* Bump the seqnum (caller checks for ntdb->flags & NTDB_SEQNUM) */ void ntdb_inc_seqnum(struct ntdb_context *ntdb); /* lock.c: */ /* Print message because another ntdb owns a lock we want. */ enum NTDB_ERROR owner_conflict(struct ntdb_context *ntdb, const char *call); /* If we fork, we no longer really own locks. */ bool check_lock_pid(struct ntdb_context *ntdb, const char *call, bool log); /* Lock/unlock a hash bucket. */ enum NTDB_ERROR ntdb_lock_hash(struct ntdb_context *ntdb, unsigned int hbucket, int ltype); enum NTDB_ERROR ntdb_unlock_hash(struct ntdb_context *ntdb, unsigned int hash, int ltype); /* For closing the file. */ void ntdb_lock_cleanup(struct ntdb_context *ntdb); /* Lock/unlock a particular free bucket. */ enum NTDB_ERROR ntdb_lock_free_bucket(struct ntdb_context *ntdb, ntdb_off_t b_off, enum ntdb_lock_flags waitflag); void ntdb_unlock_free_bucket(struct ntdb_context *ntdb, ntdb_off_t b_off); /* Serialize transaction start. */ enum NTDB_ERROR ntdb_transaction_lock(struct ntdb_context *ntdb, int ltype); void ntdb_transaction_unlock(struct ntdb_context *ntdb, int ltype); /* Do we have any hash locks (ie. via ntdb_chainlock) ? */ bool ntdb_has_hash_locks(struct ntdb_context *ntdb); /* Lock entire database. */ enum NTDB_ERROR ntdb_allrecord_lock(struct ntdb_context *ntdb, int ltype, enum ntdb_lock_flags flags, bool upgradable); void ntdb_allrecord_unlock(struct ntdb_context *ntdb, int ltype); enum NTDB_ERROR ntdb_allrecord_upgrade(struct ntdb_context *ntdb, off_t start); /* Serialize db open. */ enum NTDB_ERROR ntdb_lock_open(struct ntdb_context *ntdb, int ltype, enum ntdb_lock_flags flags); void ntdb_unlock_open(struct ntdb_context *ntdb, int ltype); bool ntdb_has_open_lock(struct ntdb_context *ntdb); /* Serialize db expand. */ enum NTDB_ERROR ntdb_lock_expand(struct ntdb_context *ntdb, int ltype); void ntdb_unlock_expand(struct ntdb_context *ntdb, int ltype); bool ntdb_has_expansion_lock(struct ntdb_context *ntdb); /* If it needs recovery, grab all the locks and do it. */ enum NTDB_ERROR ntdb_lock_and_recover(struct ntdb_context *ntdb); /* Default lock and unlock functions. */ int ntdb_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag, void *); int ntdb_fcntl_unlock(int fd, int rw, off_t off, off_t len, void *); /* transaction.c: */ enum NTDB_ERROR ntdb_transaction_recover(struct ntdb_context *ntdb); ntdb_bool_err ntdb_needs_recovery(struct ntdb_context *ntdb); struct ntdb_context { /* Single list of all TDBs, to detect multiple opens. */ struct ntdb_context *next; /* Filename of the database. */ const char *name; /* Logging function */ void (*log_fn)(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *data); void *log_data; /* Open flags passed to ntdb_open. */ int open_flags; /* low level (fnctl) lock functions. */ int (*lock_fn)(int fd, int rw, off_t off, off_t len, bool w, void *); int (*unlock_fn)(int fd, int rw, off_t off, off_t len, void *); void *lock_data; /* the ntdb flags passed to ntdb_open. */ uint32_t flags; /* Our statistics. */ struct ntdb_attribute_stats stats; /* The actual file information */ struct ntdb_file *file; /* Hash function. */ uint32_t (*hash_fn)(const void *key, size_t len, uint32_t seed, void *); void *hash_data; uint32_t hash_seed; /* Bits in toplevel hash table. */ unsigned int hash_bits; /* Allocate and free functions. */ void *(*alloc_fn)(const void *owner, size_t len, void *priv_data); void *(*expand_fn)(void *old, size_t newlen, void *priv_data); void (*free_fn)(void *old, void *priv_data); void *alloc_data; /* Our open hook, if any. */ enum NTDB_ERROR (*openhook)(int fd, void *data); void *openhook_data; /* Set if we are in a transaction. */ struct ntdb_transaction *transaction; /* What free table are we using? */ ntdb_off_t ftable_off; unsigned int ftable; /* IO methods: changes for transactions. */ const struct ntdb_methods *io; /* Direct access information */ struct ntdb_access_hdr *access; }; /* ntdb.c: */ enum NTDB_ERROR COLD PRINTF_FMT(4, 5) ntdb_logerr(struct ntdb_context *ntdb, enum NTDB_ERROR ecode, enum ntdb_log_level level, const char *fmt, ...); static inline enum NTDB_ERROR ntdb_oob(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len, bool probe) { if (likely(off + len >= off) && likely(off + len <= ntdb->file->map_size) && likely(!probe)) { return NTDB_SUCCESS; } return ntdb->io->oob(ntdb, off, len, probe); } /* Convenience routine to get an offset. */ static inline ntdb_off_t ntdb_read_off(struct ntdb_context *ntdb, ntdb_off_t off) { return ntdb->io->read_off(ntdb, off); } /* Write an offset at an offset. */ static inline enum NTDB_ERROR ntdb_write_off(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_off_t val) { return ntdb->io->write_off(ntdb, off, val); } #ifdef NTDB_TRACE void ntdb_trace(struct ntdb_context *ntdb, const char *op); void ntdb_trace_seqnum(struct ntdb_context *ntdb, uint32_t seqnum, const char *op); void ntdb_trace_open(struct ntdb_context *ntdb, const char *op, unsigned hash_size, unsigned ntdb_flags, unsigned open_flags); void ntdb_trace_ret(struct ntdb_context *ntdb, const char *op, int ret); void ntdb_trace_retrec(struct ntdb_context *ntdb, const char *op, NTDB_DATA ret); void ntdb_trace_1rec(struct ntdb_context *ntdb, const char *op, NTDB_DATA rec); void ntdb_trace_1rec_ret(struct ntdb_context *ntdb, const char *op, NTDB_DATA rec, int ret); void ntdb_trace_1rec_retrec(struct ntdb_context *ntdb, const char *op, NTDB_DATA rec, NTDB_DATA ret); void ntdb_trace_2rec_flag_ret(struct ntdb_context *ntdb, const char *op, NTDB_DATA rec1, NTDB_DATA rec2, unsigned flag, int ret); void ntdb_trace_2rec_retrec(struct ntdb_context *ntdb, const char *op, NTDB_DATA rec1, NTDB_DATA rec2, NTDB_DATA ret); #else #define ntdb_trace(ntdb, op) #define ntdb_trace_seqnum(ntdb, seqnum, op) #define ntdb_trace_open(ntdb, op, hash_size, ntdb_flags, open_flags) #define ntdb_trace_ret(ntdb, op, ret) #define ntdb_trace_retrec(ntdb, op, ret) #define ntdb_trace_1rec(ntdb, op, rec) #define ntdb_trace_1rec_ret(ntdb, op, rec, ret) #define ntdb_trace_1rec_retrec(ntdb, op, rec, ret) #define ntdb_trace_2rec_flag_ret(ntdb, op, rec1, rec2, flag, ret) #define ntdb_trace_2rec_retrec(ntdb, op, rec1, rec2, ret) #endif /* !NTDB_TRACE */ #endif ntdb-1.0/pyntdb.c000066400000000000000000000370561224151530700137600ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Python interface to ntdb. Simply modified from tdb version. Copyright (C) 2004-2006 Tim Potter Copyright (C) 2007-2008 Jelmer Vernooij Copyright (C) 2011 Rusty Russell ** NOTE! The following LGPL license applies to the ntdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include "replace.h" #include "system/filesys.h" #ifndef Py_RETURN_NONE #define Py_RETURN_NONE return Py_INCREF(Py_None), Py_None #endif /* Include ntdb headers */ #include typedef struct { PyObject_HEAD struct ntdb_context *ctx; bool closed; } PyNtdbObject; staticforward PyTypeObject PyNtdb; static void PyErr_SetTDBError(enum NTDB_ERROR e) { PyErr_SetObject(PyExc_RuntimeError, Py_BuildValue("(i,s)", e, ntdb_errorstr(e))); } static NTDB_DATA PyString_AsNtdb_Data(PyObject *data) { NTDB_DATA ret; ret.dptr = (unsigned char *)PyString_AsString(data); ret.dsize = PyString_Size(data); return ret; } static PyObject *PyString_FromNtdb_Data(NTDB_DATA data) { PyObject *ret = PyString_FromStringAndSize((const char *)data.dptr, data.dsize); free(data.dptr); return ret; } #define PyErr_NTDB_ERROR_IS_ERR_RAISE(ret) \ if (ret != NTDB_SUCCESS) { \ PyErr_SetTDBError(ret); \ return NULL; \ } static void stderr_log(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *data) { fprintf(stderr, "%s:%s:%s\n", ntdb_name(ntdb), ntdb_errorstr(ecode), message); } static PyObject *py_ntdb_open(PyTypeObject *type, PyObject *args, PyObject *kwargs) { char *name = NULL; int ntdb_flags = NTDB_DEFAULT, flags = O_RDWR, mode = 0600; struct ntdb_context *ctx; PyNtdbObject *ret; union ntdb_attribute logattr; const char *kwnames[] = { "name", "ntdb_flags", "flags", "mode", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|siii", cast_const2(char **, kwnames), &name, &ntdb_flags, &flags, &mode)) return NULL; if (name == NULL) { ntdb_flags |= NTDB_INTERNAL; } logattr.log.base.attr = NTDB_ATTRIBUTE_LOG; logattr.log.base.next = NULL; logattr.log.fn = stderr_log; ctx = ntdb_open(name, ntdb_flags, flags, mode, &logattr); if (ctx == NULL) { PyErr_SetFromErrno(PyExc_IOError); return NULL; } ret = PyObject_New(PyNtdbObject, &PyNtdb); if (!ret) { ntdb_close(ctx); return NULL; } ret->ctx = ctx; ret->closed = false; return (PyObject *)ret; } static PyObject *obj_transaction_cancel(PyNtdbObject *self) { ntdb_transaction_cancel(self->ctx); Py_RETURN_NONE; } static PyObject *obj_transaction_commit(PyNtdbObject *self) { enum NTDB_ERROR ret = ntdb_transaction_commit(self->ctx); PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); Py_RETURN_NONE; } static PyObject *obj_transaction_prepare_commit(PyNtdbObject *self) { enum NTDB_ERROR ret = ntdb_transaction_prepare_commit(self->ctx); PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); Py_RETURN_NONE; } static PyObject *obj_transaction_start(PyNtdbObject *self) { enum NTDB_ERROR ret = ntdb_transaction_start(self->ctx); PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); Py_RETURN_NONE; } static PyObject *obj_lockall(PyNtdbObject *self) { enum NTDB_ERROR ret = ntdb_lockall(self->ctx); PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); Py_RETURN_NONE; } static PyObject *obj_unlockall(PyNtdbObject *self) { ntdb_unlockall(self->ctx); Py_RETURN_NONE; } static PyObject *obj_lockall_read(PyNtdbObject *self) { enum NTDB_ERROR ret = ntdb_lockall_read(self->ctx); PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); Py_RETURN_NONE; } static PyObject *obj_unlockall_read(PyNtdbObject *self) { ntdb_unlockall_read(self->ctx); Py_RETURN_NONE; } static PyObject *obj_close(PyNtdbObject *self) { int ret; if (self->closed) Py_RETURN_NONE; ret = ntdb_close(self->ctx); self->closed = true; if (ret != 0) { PyErr_SetTDBError(NTDB_ERR_IO); return NULL; } Py_RETURN_NONE; } static PyObject *obj_get(PyNtdbObject *self, PyObject *args) { NTDB_DATA key, data; PyObject *py_key; enum NTDB_ERROR ret; if (!PyArg_ParseTuple(args, "O", &py_key)) return NULL; key = PyString_AsNtdb_Data(py_key); ret = ntdb_fetch(self->ctx, key, &data); if (ret == NTDB_ERR_NOEXIST) Py_RETURN_NONE; PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); return PyString_FromNtdb_Data(data); } static PyObject *obj_append(PyNtdbObject *self, PyObject *args) { NTDB_DATA key, data; PyObject *py_key, *py_data; enum NTDB_ERROR ret; if (!PyArg_ParseTuple(args, "OO", &py_key, &py_data)) return NULL; key = PyString_AsNtdb_Data(py_key); data = PyString_AsNtdb_Data(py_data); ret = ntdb_append(self->ctx, key, data); PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); Py_RETURN_NONE; } static PyObject *obj_firstkey(PyNtdbObject *self) { enum NTDB_ERROR ret; NTDB_DATA key; ret = ntdb_firstkey(self->ctx, &key); if (ret == NTDB_ERR_NOEXIST) Py_RETURN_NONE; PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); return PyString_FromNtdb_Data(key); } static PyObject *obj_nextkey(PyNtdbObject *self, PyObject *args) { NTDB_DATA key; PyObject *py_key; enum NTDB_ERROR ret; if (!PyArg_ParseTuple(args, "O", &py_key)) return NULL; /* Malloc here, since ntdb_nextkey frees. */ key.dsize = PyString_Size(py_key); key.dptr = malloc(key.dsize); memcpy(key.dptr, PyString_AsString(py_key), key.dsize); ret = ntdb_nextkey(self->ctx, &key); if (ret == NTDB_ERR_NOEXIST) Py_RETURN_NONE; PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); return PyString_FromNtdb_Data(key); } static PyObject *obj_delete(PyNtdbObject *self, PyObject *args) { NTDB_DATA key; PyObject *py_key; enum NTDB_ERROR ret; if (!PyArg_ParseTuple(args, "O", &py_key)) return NULL; key = PyString_AsNtdb_Data(py_key); ret = ntdb_delete(self->ctx, key); PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); Py_RETURN_NONE; } static PyObject *obj_has_key(PyNtdbObject *self, PyObject *args) { NTDB_DATA key; PyObject *py_key; if (!PyArg_ParseTuple(args, "O", &py_key)) return NULL; key = PyString_AsNtdb_Data(py_key); if (ntdb_exists(self->ctx, key)) return Py_True; return Py_False; } static PyObject *obj_store(PyNtdbObject *self, PyObject *args) { NTDB_DATA key, value; enum NTDB_ERROR ret; int flag = NTDB_REPLACE; PyObject *py_key, *py_value; if (!PyArg_ParseTuple(args, "OO|i", &py_key, &py_value, &flag)) return NULL; key = PyString_AsNtdb_Data(py_key); value = PyString_AsNtdb_Data(py_value); ret = ntdb_store(self->ctx, key, value, flag); PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); Py_RETURN_NONE; } static PyObject *obj_add_flag(PyNtdbObject *self, PyObject *args) { unsigned flag; if (!PyArg_ParseTuple(args, "I", &flag)) return NULL; ntdb_add_flag(self->ctx, flag); Py_RETURN_NONE; } static PyObject *obj_remove_flag(PyNtdbObject *self, PyObject *args) { unsigned flag; if (!PyArg_ParseTuple(args, "I", &flag)) return NULL; ntdb_remove_flag(self->ctx, flag); Py_RETURN_NONE; } typedef struct { PyObject_HEAD NTDB_DATA current; bool end; PyNtdbObject *iteratee; } PyNtdbIteratorObject; static PyObject *ntdb_iter_next(PyNtdbIteratorObject *self) { enum NTDB_ERROR e; PyObject *ret; if (self->end) return NULL; ret = PyString_FromStringAndSize((const char *)self->current.dptr, self->current.dsize); e = ntdb_nextkey(self->iteratee->ctx, &self->current); if (e == NTDB_ERR_NOEXIST) self->end = true; else PyErr_NTDB_ERROR_IS_ERR_RAISE(e); return ret; } static void ntdb_iter_dealloc(PyNtdbIteratorObject *self) { Py_DECREF(self->iteratee); PyObject_Del(self); } PyTypeObject PyNtdbIterator = { .tp_name = "Iterator", .tp_basicsize = sizeof(PyNtdbIteratorObject), .tp_iternext = (iternextfunc)ntdb_iter_next, .tp_dealloc = (destructor)ntdb_iter_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_iter = PyObject_SelfIter, }; static PyObject *ntdb_object_iter(PyNtdbObject *self) { PyNtdbIteratorObject *ret; enum NTDB_ERROR e; ret = PyObject_New(PyNtdbIteratorObject, &PyNtdbIterator); if (!ret) return NULL; e = ntdb_firstkey(self->ctx, &ret->current); if (e == NTDB_ERR_NOEXIST) { ret->end = true; } else { PyErr_NTDB_ERROR_IS_ERR_RAISE(e); ret->end = false; } ret->iteratee = self; Py_INCREF(self); return (PyObject *)ret; } static PyObject *obj_clear(PyNtdbObject *self) { enum NTDB_ERROR ret = ntdb_wipe_all(self->ctx); PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); Py_RETURN_NONE; } static PyObject *obj_enable_seqnum(PyNtdbObject *self) { ntdb_add_flag(self->ctx, NTDB_SEQNUM); Py_RETURN_NONE; } static PyMethodDef ntdb_object_methods[] = { { "transaction_cancel", (PyCFunction)obj_transaction_cancel, METH_NOARGS, "S.transaction_cancel() -> None\n" "Cancel the currently active transaction." }, { "transaction_commit", (PyCFunction)obj_transaction_commit, METH_NOARGS, "S.transaction_commit() -> None\n" "Commit the currently active transaction." }, { "transaction_prepare_commit", (PyCFunction)obj_transaction_prepare_commit, METH_NOARGS, "S.transaction_prepare_commit() -> None\n" "Prepare to commit the currently active transaction" }, { "transaction_start", (PyCFunction)obj_transaction_start, METH_NOARGS, "S.transaction_start() -> None\n" "Start a new transaction." }, { "lock_all", (PyCFunction)obj_lockall, METH_NOARGS, NULL }, { "unlock_all", (PyCFunction)obj_unlockall, METH_NOARGS, NULL }, { "read_lock_all", (PyCFunction)obj_lockall_read, METH_NOARGS, NULL }, { "read_unlock_all", (PyCFunction)obj_unlockall_read, METH_NOARGS, NULL }, { "close", (PyCFunction)obj_close, METH_NOARGS, NULL }, { "get", (PyCFunction)obj_get, METH_VARARGS, "S.get(key) -> value\n" "Fetch a value." }, { "append", (PyCFunction)obj_append, METH_VARARGS, "S.append(key, value) -> None\n" "Append data to an existing key." }, { "firstkey", (PyCFunction)obj_firstkey, METH_NOARGS, "S.firstkey() -> data\n" "Return the first key in this database." }, { "nextkey", (PyCFunction)obj_nextkey, METH_NOARGS, "S.nextkey(key) -> data\n" "Return the next key in this database." }, { "delete", (PyCFunction)obj_delete, METH_VARARGS, "S.delete(key) -> None\n" "Delete an entry." }, { "has_key", (PyCFunction)obj_has_key, METH_VARARGS, "S.has_key(key) -> None\n" "Check whether key exists in this database." }, { "store", (PyCFunction)obj_store, METH_VARARGS, "S.store(key, data, flag=REPLACE) -> None" "Store data." }, { "add_flag", (PyCFunction)obj_add_flag, METH_VARARGS, "S.add_flag(flag) -> None" }, { "remove_flag", (PyCFunction)obj_remove_flag, METH_VARARGS, "S.remove_flag(flag) -> None" }, { "iterkeys", (PyCFunction)ntdb_object_iter, METH_NOARGS, "S.iterkeys() -> iterator" }, { "clear", (PyCFunction)obj_clear, METH_NOARGS, "S.clear() -> None\n" "Wipe the entire database." }, { "enable_seqnum", (PyCFunction)obj_enable_seqnum, METH_NOARGS, "S.enable_seqnum() -> None" }, { NULL } }; static PyObject *obj_get_flags(PyNtdbObject *self, void *closure) { return PyInt_FromLong(ntdb_get_flags(self->ctx)); } static PyObject *obj_get_filename(PyNtdbObject *self, void *closure) { return PyString_FromString(ntdb_name(self->ctx)); } static PyObject *obj_get_seqnum(PyNtdbObject *self, void *closure) { return PyInt_FromLong(ntdb_get_seqnum(self->ctx)); } static PyGetSetDef ntdb_object_getsetters[] = { { cast_const(char *, "flags"), (getter)obj_get_flags, NULL, NULL }, { cast_const(char *, "filename"), (getter)obj_get_filename, NULL, cast_const(char *, "The filename of this NTDB file.")}, { cast_const(char *, "seqnum"), (getter)obj_get_seqnum, NULL, NULL }, { NULL } }; static PyObject *ntdb_object_repr(PyNtdbObject *self) { if (ntdb_get_flags(self->ctx) & NTDB_INTERNAL) { return PyString_FromString("Ntdb()"); } else { return PyString_FromFormat("Ntdb('%s')", ntdb_name(self->ctx)); } } static void ntdb_object_dealloc(PyNtdbObject *self) { if (!self->closed) ntdb_close(self->ctx); self->ob_type->tp_free(self); } static PyObject *obj_getitem(PyNtdbObject *self, PyObject *key) { NTDB_DATA tkey, val; enum NTDB_ERROR ret; if (!PyString_Check(key)) { PyErr_SetString(PyExc_TypeError, "Expected string as key"); return NULL; } tkey.dptr = (unsigned char *)PyString_AsString(key); tkey.dsize = PyString_Size(key); ret = ntdb_fetch(self->ctx, tkey, &val); if (ret == NTDB_ERR_NOEXIST) { PyErr_SetString(PyExc_KeyError, "No such NTDB entry"); return NULL; } else { PyErr_NTDB_ERROR_IS_ERR_RAISE(ret); return PyString_FromNtdb_Data(val); } } static int obj_setitem(PyNtdbObject *self, PyObject *key, PyObject *value) { NTDB_DATA tkey, tval; enum NTDB_ERROR ret; if (!PyString_Check(key)) { PyErr_SetString(PyExc_TypeError, "Expected string as key"); return -1; } tkey = PyString_AsNtdb_Data(key); if (value == NULL) { ret = ntdb_delete(self->ctx, tkey); } else { if (!PyString_Check(value)) { PyErr_SetString(PyExc_TypeError, "Expected string as value"); return -1; } tval = PyString_AsNtdb_Data(value); ret = ntdb_store(self->ctx, tkey, tval, NTDB_REPLACE); } if (ret != NTDB_SUCCESS) { PyErr_SetTDBError(ret); return -1; } return ret; } static PyMappingMethods ntdb_object_mapping = { .mp_subscript = (binaryfunc)obj_getitem, .mp_ass_subscript = (objobjargproc)obj_setitem, }; static PyTypeObject PyNtdb = { .tp_name = "ntdb.Ntdb", .tp_basicsize = sizeof(PyNtdbObject), .tp_methods = ntdb_object_methods, .tp_getset = ntdb_object_getsetters, .tp_new = py_ntdb_open, .tp_doc = "A NTDB file", .tp_repr = (reprfunc)ntdb_object_repr, .tp_dealloc = (destructor)ntdb_object_dealloc, .tp_as_mapping = &ntdb_object_mapping, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_ITER, .tp_iter = (getiterfunc)ntdb_object_iter, }; static PyMethodDef ntdb_methods[] = { { "open", (PyCFunction)py_ntdb_open, METH_VARARGS|METH_KEYWORDS, "open(name, hash_size=0, ntdb_flags=NTDB_DEFAULT, flags=O_RDWR, mode=0600)\n" "Open a NTDB file." }, { NULL } }; void initntdb(void); void initntdb(void) { PyObject *m; if (PyType_Ready(&PyNtdb) < 0) return; if (PyType_Ready(&PyNtdbIterator) < 0) return; m = Py_InitModule3("ntdb", ntdb_methods, "NTDB is a simple key-value database similar to GDBM that supports multiple writers."); if (m == NULL) return; PyModule_AddObject(m, "REPLACE", PyInt_FromLong(NTDB_REPLACE)); PyModule_AddObject(m, "INSERT", PyInt_FromLong(NTDB_INSERT)); PyModule_AddObject(m, "MODIFY", PyInt_FromLong(NTDB_MODIFY)); PyModule_AddObject(m, "DEFAULT", PyInt_FromLong(NTDB_DEFAULT)); PyModule_AddObject(m, "INTERNAL", PyInt_FromLong(NTDB_INTERNAL)); PyModule_AddObject(m, "NOLOCK", PyInt_FromLong(NTDB_NOLOCK)); PyModule_AddObject(m, "NOMMAP", PyInt_FromLong(NTDB_NOMMAP)); PyModule_AddObject(m, "CONVERT", PyInt_FromLong(NTDB_CONVERT)); PyModule_AddObject(m, "NOSYNC", PyInt_FromLong(NTDB_NOSYNC)); PyModule_AddObject(m, "SEQNUM", PyInt_FromLong(NTDB_SEQNUM)); PyModule_AddObject(m, "ALLOW_NESTING", PyInt_FromLong(NTDB_ALLOW_NESTING)); PyModule_AddObject(m, "__docformat__", PyString_FromString("restructuredText")); PyModule_AddObject(m, "__version__", PyString_FromString(PACKAGE_VERSION)); Py_INCREF(&PyNtdb); PyModule_AddObject(m, "Ntdb", (PyObject *)&PyNtdb); Py_INCREF(&PyNtdbIterator); } ntdb-1.0/summary.c000066400000000000000000000236651224151530700141560ustar00rootroot00000000000000 /* Trivial Database 2: human-readable summary code Copyright (C) Rusty Russell 2010 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #include #define SUMMARY_FORMAT \ "Size of file/data: %zu/%zu\n" \ "Number of records: %zu\n" \ "Smallest/average/largest keys: %zu/%zu/%zu\n%s" \ "Smallest/average/largest data: %zu/%zu/%zu\n%s" \ "Smallest/average/largest padding: %zu/%zu/%zu\n%s" \ "Number of free records: %zu\n" \ "Smallest/average/largest free records: %zu/%zu/%zu\n%s" \ "Number of uncoalesced records: %zu\n" \ "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n%s" \ "Toplevel hash used: %u of %u\n" \ "Number of hashes: %zu\n" \ "Smallest/average/largest hash chains: %zu/%zu/%zu\n%s" \ "Percentage keys/data/padding/free/rechdrs/freehdrs/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n" #define BUCKET_SUMMARY_FORMAT_A \ "Free bucket %zu: total entries %zu.\n" \ "Smallest/average/largest length: %zu/%zu/%zu\n%s" #define BUCKET_SUMMARY_FORMAT_B \ "Free bucket %zu-%zu: total entries %zu.\n" \ "Smallest/average/largest length: %zu/%zu/%zu\n%s" #define CAPABILITY_FORMAT \ "Capability %llu%s\n" #define HISTO_WIDTH 70 #define HISTO_HEIGHT 20 static ntdb_off_t count_hash(struct ntdb_context *ntdb, ntdb_off_t hash_off, ntdb_off_t num) { const ntdb_off_t *h; ntdb_off_t i, count = 0; h = ntdb_access_read(ntdb, hash_off, sizeof(*h) * num, true); if (NTDB_PTR_IS_ERR(h)) { return NTDB_ERR_TO_OFF(NTDB_PTR_ERR(h)); } for (i = 0; i < num; i++) count += (h[i] != 0); ntdb_access_release(ntdb, h); return count; } static enum NTDB_ERROR summarize(struct ntdb_context *ntdb, struct tally *ftables, struct tally *fr, struct tally *keys, struct tally *data, struct tally *extra, struct tally *uncoal, struct tally *hashes, size_t *num_caps) { ntdb_off_t off; ntdb_len_t len; ntdb_len_t unc = 0; for (off = sizeof(struct ntdb_header); off < ntdb->file->map_size; off += len) { const union { struct ntdb_used_record u; struct ntdb_free_record f; struct ntdb_recovery_record r; } *p; /* We might not be able to get the whole thing. */ p = ntdb_access_read(ntdb, off, sizeof(p->f), true); if (NTDB_PTR_IS_ERR(p)) { return NTDB_PTR_ERR(p); } if (frec_magic(&p->f) != NTDB_FREE_MAGIC) { if (unc > 1) { tally_add(uncoal, unc); unc = 0; } } if (p->r.magic == NTDB_RECOVERY_INVALID_MAGIC || p->r.magic == NTDB_RECOVERY_MAGIC) { len = sizeof(p->r) + p->r.max_len; } else if (frec_magic(&p->f) == NTDB_FREE_MAGIC) { len = frec_len(&p->f); tally_add(fr, len); len += sizeof(p->u); unc++; } else if (rec_magic(&p->u) == NTDB_USED_MAGIC) { len = sizeof(p->u) + rec_key_length(&p->u) + rec_data_length(&p->u) + rec_extra_padding(&p->u); tally_add(keys, rec_key_length(&p->u)); tally_add(data, rec_data_length(&p->u)); tally_add(extra, rec_extra_padding(&p->u)); } else if (rec_magic(&p->u) == NTDB_HTABLE_MAGIC) { ntdb_off_t count = count_hash(ntdb, off + sizeof(p->u), 1 << ntdb->hash_bits); if (NTDB_OFF_IS_ERR(count)) { return NTDB_OFF_TO_ERR(count); } tally_add(hashes, count); tally_add(extra, rec_extra_padding(&p->u)); len = sizeof(p->u) + rec_data_length(&p->u) + rec_extra_padding(&p->u); } else if (rec_magic(&p->u) == NTDB_FTABLE_MAGIC) { len = sizeof(p->u) + rec_data_length(&p->u) + rec_extra_padding(&p->u); tally_add(ftables, rec_data_length(&p->u)); tally_add(extra, rec_extra_padding(&p->u)); } else if (rec_magic(&p->u) == NTDB_CHAIN_MAGIC) { len = sizeof(p->u) + rec_data_length(&p->u) + rec_extra_padding(&p->u); tally_add(hashes, rec_data_length(&p->u)/sizeof(ntdb_off_t)); tally_add(extra, rec_extra_padding(&p->u)); } else if (rec_magic(&p->u) == NTDB_CAP_MAGIC) { len = sizeof(p->u) + rec_data_length(&p->u) + rec_extra_padding(&p->u); (*num_caps)++; } else { len = dead_space(ntdb, off); if (NTDB_OFF_IS_ERR(len)) { return NTDB_OFF_TO_ERR(len); } } ntdb_access_release(ntdb, p); } if (unc) tally_add(uncoal, unc); return NTDB_SUCCESS; } static void add_capabilities(struct ntdb_context *ntdb, char *summary) { ntdb_off_t off, next; const struct ntdb_capability *cap; size_t count = 0; /* Append to summary. */ summary += strlen(summary); off = ntdb_read_off(ntdb, offsetof(struct ntdb_header, capabilities)); if (NTDB_OFF_IS_ERR(off)) return; /* Walk capability list. */ for (; off; off = next) { cap = ntdb_access_read(ntdb, off, sizeof(*cap), true); if (NTDB_PTR_IS_ERR(cap)) { break; } count++; sprintf(summary, CAPABILITY_FORMAT, cap->type & NTDB_CAP_TYPE_MASK, /* Noopen? How did we get here? */ (cap->type & NTDB_CAP_NOOPEN) ? " (unopenable)" : ((cap->type & NTDB_CAP_NOWRITE) && (cap->type & NTDB_CAP_NOCHECK)) ? " (uncheckable,read-only)" : (cap->type & NTDB_CAP_NOWRITE) ? " (read-only)" : (cap->type & NTDB_CAP_NOCHECK) ? " (uncheckable)" : ""); summary += strlen(summary); next = cap->next; ntdb_access_release(ntdb, cap); } } _PUBLIC_ enum NTDB_ERROR ntdb_summary(struct ntdb_context *ntdb, enum ntdb_summary_flags flags, char **summary) { ntdb_len_t len; size_t num_caps = 0; struct tally *ftables, *freet, *keys, *data, *extra, *uncoal, *hashes; char *freeg, *keysg, *datag, *extrag, *uncoalg, *hashesg; enum NTDB_ERROR ecode; freeg = keysg = datag = extrag = uncoalg = hashesg = NULL; ecode = ntdb_allrecord_lock(ntdb, F_RDLCK, NTDB_LOCK_WAIT, false); if (ecode != NTDB_SUCCESS) { return ecode; } ecode = ntdb_lock_expand(ntdb, F_RDLCK); if (ecode != NTDB_SUCCESS) { ntdb_allrecord_unlock(ntdb, F_RDLCK); return ecode; } /* Start stats off empty. */ ftables = tally_new(HISTO_HEIGHT); freet = tally_new(HISTO_HEIGHT); keys = tally_new(HISTO_HEIGHT); data = tally_new(HISTO_HEIGHT); extra = tally_new(HISTO_HEIGHT); uncoal = tally_new(HISTO_HEIGHT); hashes = tally_new(HISTO_HEIGHT); if (!ftables || !freet || !keys || !data || !extra || !uncoal || !hashes) { ecode = ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_summary: failed to allocate" " tally structures"); goto unlock; } ecode = summarize(ntdb, ftables, freet, keys, data, extra, uncoal, hashes, &num_caps); if (ecode != NTDB_SUCCESS) { goto unlock; } if (flags & NTDB_SUMMARY_HISTOGRAMS) { freeg = tally_histogram(freet, HISTO_WIDTH, HISTO_HEIGHT); keysg = tally_histogram(keys, HISTO_WIDTH, HISTO_HEIGHT); datag = tally_histogram(data, HISTO_WIDTH, HISTO_HEIGHT); extrag = tally_histogram(extra, HISTO_WIDTH, HISTO_HEIGHT); uncoalg = tally_histogram(uncoal, HISTO_WIDTH, HISTO_HEIGHT); hashesg = tally_histogram(hashes, HISTO_WIDTH, HISTO_HEIGHT); } /* 20 is max length of a %llu. */ len = strlen(SUMMARY_FORMAT) + 33*20 + 1 + (freeg ? strlen(freeg) : 0) + (keysg ? strlen(keysg) : 0) + (datag ? strlen(datag) : 0) + (extrag ? strlen(extrag) : 0) + (uncoalg ? strlen(uncoalg) : 0) + (hashesg ? strlen(hashesg) : 0) + num_caps * (strlen(CAPABILITY_FORMAT) + 20 + strlen(" (uncheckable,read-only)")); *summary = ntdb->alloc_fn(ntdb, len, ntdb->alloc_data); if (!*summary) { ecode = ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_summary: failed to allocate string"); goto unlock; } sprintf(*summary, SUMMARY_FORMAT, (size_t)ntdb->file->map_size, tally_total(keys, NULL) + tally_total(data, NULL), tally_num(keys), tally_min(keys), tally_mean(keys), tally_max(keys), keysg ? keysg : "", tally_min(data), tally_mean(data), tally_max(data), datag ? datag : "", tally_min(extra), tally_mean(extra), tally_max(extra), extrag ? extrag : "", tally_num(freet), tally_min(freet), tally_mean(freet), tally_max(freet), freeg ? freeg : "", tally_total(uncoal, NULL), tally_min(uncoal), tally_mean(uncoal), tally_max(uncoal), uncoalg ? uncoalg : "", (unsigned)count_hash(ntdb, sizeof(struct ntdb_header), 1 << ntdb->hash_bits), 1 << ntdb->hash_bits, tally_num(hashes), tally_min(hashes), tally_mean(hashes), tally_max(hashes), hashesg ? hashesg : "", tally_total(keys, NULL) * 100.0 / ntdb->file->map_size, tally_total(data, NULL) * 100.0 / ntdb->file->map_size, tally_total(extra, NULL) * 100.0 / ntdb->file->map_size, tally_total(freet, NULL) * 100.0 / ntdb->file->map_size, (tally_num(keys) + tally_num(freet) + tally_num(hashes)) * sizeof(struct ntdb_used_record) * 100.0 / ntdb->file->map_size, tally_num(ftables) * sizeof(struct ntdb_freetable) * 100.0 / ntdb->file->map_size, (tally_total(hashes, NULL) * sizeof(ntdb_off_t) + (sizeof(ntdb_off_t) << ntdb->hash_bits)) * 100.0 / ntdb->file->map_size); add_capabilities(ntdb, *summary); unlock: ntdb->free_fn(freeg, ntdb->alloc_data); ntdb->free_fn(keysg, ntdb->alloc_data); ntdb->free_fn(datag, ntdb->alloc_data); ntdb->free_fn(extrag, ntdb->alloc_data); ntdb->free_fn(uncoalg, ntdb->alloc_data); ntdb->free_fn(hashesg, ntdb->alloc_data); ntdb->free_fn(freet, ntdb->alloc_data); ntdb->free_fn(keys, ntdb->alloc_data); ntdb->free_fn(data, ntdb->alloc_data); ntdb->free_fn(extra, ntdb->alloc_data); ntdb->free_fn(uncoal, ntdb->alloc_data); ntdb->free_fn(ftables, ntdb->alloc_data); ntdb->free_fn(hashes, ntdb->alloc_data); ntdb_allrecord_unlock(ntdb, F_RDLCK); ntdb_unlock_expand(ntdb, F_RDLCK); return ecode; } ntdb-1.0/test/000077500000000000000000000000001224151530700132605ustar00rootroot00000000000000ntdb-1.0/test/api-12-store.c000066400000000000000000000031201224151530700155430ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include "logging.h" /* We use the same seed which we saw a failure on. */ static uint32_t fixedhash(const void *key, size_t len, uint32_t seed, void *p) { return hash64_stable((const unsigned char *)key, len, *(uint64_t *)p); } int main(int argc, char *argv[]) { unsigned int i, j; struct ntdb_context *ntdb; uint64_t seed = 16014841315512641303ULL; union ntdb_attribute fixed_hattr = { .hash = { .base = { NTDB_ATTRIBUTE_HASH }, .fn = fixedhash, .data = &seed } }; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = { (unsigned char *)&j, sizeof(j) }; NTDB_DATA data = { (unsigned char *)&j, sizeof(j) }; fixed_hattr.base.next = &tap_log_attr; plan_tests(sizeof(flags) / sizeof(flags[0]) * (1 + 500 * 3) + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-12-store.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &fixed_hattr); ok1(ntdb); if (!ntdb) continue; /* We seemed to lose some keys. * Insert and check they're in there! */ for (j = 0; j < 500; j++) { NTDB_DATA d = { NULL, 0 }; /* Bogus GCC warning */ ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == 0); ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(ntdb_deq(d, data)); free(d.dptr); } ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-13-delete.c000066400000000000000000000126121224151530700156600ustar00rootroot00000000000000#include "private.h" // For NTDB_TOPLEVEL_HASH_BITS #include #include #include #include #include "ntdb.h" #include "tap-interface.h" #include "logging.h" /* We rig the hash so adjacent-numbered records always clash. */ static uint32_t clash(const void *key, size_t len, uint32_t seed, void *priv) { return *((const unsigned int *)key) / 2; } /* We use the same seed which we saw a failure on. */ static uint32_t fixedhash(const void *key, size_t len, uint32_t seed, void *p) { return hash64_stable((const unsigned char *)key, len, *(uint64_t *)p); } static bool store_records(struct ntdb_context *ntdb) { int i; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA d, data = { (unsigned char *)&i, sizeof(i) }; for (i = 0; i < 1000; i++) { if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != 0) return false; ntdb_fetch(ntdb, key, &d); if (!ntdb_deq(d, data)) return false; free(d.dptr); } return true; } static void test_val(struct ntdb_context *ntdb, uint64_t val) { uint64_t v; NTDB_DATA key = { (unsigned char *)&v, sizeof(v) }; NTDB_DATA d, data = { (unsigned char *)&v, sizeof(v) }; /* Insert an entry, then delete it. */ v = val; /* Delete should fail. */ ok1(ntdb_delete(ntdb, key) == NTDB_ERR_NOEXIST); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Insert should succeed. */ ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Delete should succeed. */ ok1(ntdb_delete(ntdb, key) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Re-add it, then add collision. */ ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); v = val + 1; ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Can find both? */ ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == data.dsize); free(d.dptr); v = val; ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == data.dsize); free(d.dptr); /* Delete second one. */ v = val + 1; ok1(ntdb_delete(ntdb, key) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Re-add */ ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Now, try deleting first one. */ v = val; ok1(ntdb_delete(ntdb, key) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Can still find second? */ v = val + 1; ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == data.dsize); free(d.dptr); /* Now, this will be ideally placed. */ v = val + 2; ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* This will collide with both. */ v = val; ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); /* We can still find them all, right? */ ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == data.dsize); free(d.dptr); v = val + 1; ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == data.dsize); free(d.dptr); v = val + 2; ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == data.dsize); free(d.dptr); /* And if we delete val + 1, that val + 2 should not move! */ v = val + 1; ok1(ntdb_delete(ntdb, key) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); v = val; ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == data.dsize); free(d.dptr); v = val + 2; ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == data.dsize); free(d.dptr); /* Delete those two, so we are empty. */ ok1(ntdb_delete(ntdb, key) == 0); v = val; ok1(ntdb_delete(ntdb, key) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); } int main(int argc, char *argv[]) { unsigned int i, j; struct ntdb_context *ntdb; uint64_t seed = 16014841315512641303ULL; union ntdb_attribute clash_hattr = { .hash = { .base = { NTDB_ATTRIBUTE_HASH }, .fn = clash } }; union ntdb_attribute fixed_hattr = { .hash = { .base = { NTDB_ATTRIBUTE_HASH }, .fn = fixedhash, .data = &seed } }; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; /* These two values gave trouble before. */ int vals[] = { 755, 837 }; clash_hattr.base.next = &tap_log_attr; fixed_hattr.base.next = &tap_log_attr; plan_tests(sizeof(flags) / sizeof(flags[0]) * (39 * 3 + 5 + sizeof(vals)/sizeof(vals[0])*2) + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-13-delete.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &clash_hattr); ok1(ntdb); if (!ntdb) continue; /* Check start of hash table. */ test_val(ntdb, 0); /* Check end of hash table. */ test_val(ntdb, -1ULL); /* Check mixed bitpattern. */ test_val(ntdb, 0x123456789ABCDEF0ULL); ok1(!ntdb->file || (ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0)); ntdb_close(ntdb); /* Deleting these entries in the db gave problems. */ ntdb = ntdb_open("run-13-delete.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &fixed_hattr); ok1(ntdb); if (!ntdb) continue; ok1(store_records(ntdb)); ok1(ntdb_check(ntdb, NULL, NULL) == 0); for (j = 0; j < sizeof(vals)/sizeof(vals[0]); j++) { NTDB_DATA key; key.dptr = (unsigned char *)&vals[j]; key.dsize = sizeof(vals[j]); ok1(ntdb_delete(ntdb, key) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); } ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-14-exists.c000066400000000000000000000024121224151530700157330ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" static bool test_records(struct ntdb_context *ntdb) { int i; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA data = { (unsigned char *)&i, sizeof(i) }; for (i = 0; i < 1000; i++) { if (ntdb_exists(ntdb, key)) return false; if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != 0) return false; if (!ntdb_exists(ntdb, key)) return false; } for (i = 0; i < 1000; i++) { if (!ntdb_exists(ntdb, key)) return false; if (ntdb_delete(ntdb, key) != 0) return false; if (ntdb_exists(ntdb, key)) return false; } return true; } int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 2 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-14-exists.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (ok1(ntdb)) ok1(test_records(ntdb)); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-16-wipe_all.c000066400000000000000000000021641224151530700162160ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" static bool add_records(struct ntdb_context *ntdb) { int i; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA data = { (unsigned char *)&i, sizeof(i) }; for (i = 0; i < 1000; i++) { if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != 0) return false; } return true; } int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 4 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-16-wipe_all.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (ok1(ntdb)) { NTDB_DATA key; ok1(add_records(ntdb)); ok1(ntdb_wipe_all(ntdb) == NTDB_SUCCESS); ok1(ntdb_firstkey(ntdb, &key) == NTDB_ERR_NOEXIST); ntdb_close(ntdb); } } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-20-alloc-attr.c000066400000000000000000000050271224151530700164600ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include #include "logging.h" static const struct ntdb_context *curr_ntdb; static const struct ntdb_file *curr_file; static int owner_null_count, owner_weird_count, alloc_count, free_count, expand_count; static void *test_alloc(const void *owner, size_t len, void *priv_data) { void *ret; if (!owner) { owner_null_count++; } else if (owner != curr_ntdb && owner != curr_file) { owner_weird_count++; } alloc_count++; ret = malloc(len); /* The first time, this is the current ntdb, next is * for the file struct. */ if (!owner) { if (!curr_ntdb) { curr_ntdb = ret; } else if (!curr_file) { curr_file = ret; } } assert(priv_data == &owner_weird_count); return ret; } static void *test_expand(void *old, size_t newlen, void *priv_data) { expand_count++; assert(priv_data == &owner_weird_count); return realloc(old, newlen); } static void test_free(void *old, void *priv_data) { assert(priv_data == &owner_weird_count); if (old) { free_count++; } free(old); } int main(int argc, char *argv[]) { unsigned int i, j; union ntdb_attribute alloc_attr; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = { (unsigned char *)&j, sizeof(j) }; NTDB_DATA data = { (unsigned char *)&j, sizeof(j) }; alloc_attr.base.next = &tap_log_attr; alloc_attr.base.attr = NTDB_ATTRIBUTE_ALLOCATOR; alloc_attr.alloc.alloc = test_alloc; alloc_attr.alloc.expand = test_expand; alloc_attr.alloc.free = test_free; alloc_attr.alloc.priv_data = &owner_weird_count; plan_tests(sizeof(flags) / sizeof(flags[0]) * (1 + 700 * 3 + 4) + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { curr_ntdb = NULL; curr_file = NULL; ntdb = ntdb_open("run-12-store.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &alloc_attr); ok1(ntdb); if (!ntdb) continue; for (j = 0; j < 700; j++) { NTDB_DATA d = { NULL, 0 }; /* Bogus GCC warning */ ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == 0); ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(ntdb_deq(d, data)); test_free(d.dptr, &owner_weird_count); } ntdb_close(ntdb); ok1(owner_null_count == 2+i*2); ok1(owner_weird_count == 0); ok1(alloc_count == free_count); ok1(expand_count != 0); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-21-parse_record.c000066400000000000000000000031211224151530700170600ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" static enum NTDB_ERROR parse(NTDB_DATA key, NTDB_DATA data, NTDB_DATA *expected) { if (!ntdb_deq(data, *expected)) return NTDB_ERR_EINVAL; return NTDB_SUCCESS; } static enum NTDB_ERROR parse_err(NTDB_DATA key, NTDB_DATA data, void *unused) { return 100; } static bool test_records(struct ntdb_context *ntdb) { int i; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA data = { (unsigned char *)&i, sizeof(i) }; for (i = 0; i < 1000; i++) { if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != 0) return false; } for (i = 0; i < 1000; i++) { if (ntdb_parse_record(ntdb, key, parse, &data) != NTDB_SUCCESS) return false; } if (ntdb_parse_record(ntdb, key, parse, &data) != NTDB_ERR_NOEXIST) return false; /* Test error return from parse function. */ i = 0; if (ntdb_parse_record(ntdb, key, parse_err, NULL) != 100) return false; return true; } int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 2 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("api-21-parse_record.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (ok1(ntdb)) ok1(test_records(ntdb)); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-55-transaction.c000066400000000000000000000040471224151530700167540ustar00rootroot00000000000000#include "private.h" // struct ntdb_context #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include "logging.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; unsigned char *buffer; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data; buffer = malloc(1000); for (i = 0; i < 1000; i++) buffer[i] = i; plan_tests(sizeof(flags) / sizeof(flags[0]) * 20 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-55-transaction.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; ok1(ntdb_transaction_start(ntdb) == 0); data.dptr = buffer; data.dsize = 1000; ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_fetch(ntdb, key, &data) == NTDB_SUCCESS); ok1(data.dsize == 1000); ok1(memcmp(data.dptr, buffer, data.dsize) == 0); free(data.dptr); /* Cancelling a transaction means no store */ ntdb_transaction_cancel(ntdb); ok1(ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_fetch(ntdb, key, &data) == NTDB_ERR_NOEXIST); /* Commit the transaction. */ ok1(ntdb_transaction_start(ntdb) == 0); data.dptr = buffer; data.dsize = 1000; ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_fetch(ntdb, key, &data) == NTDB_SUCCESS); ok1(data.dsize == 1000); ok1(memcmp(data.dptr, buffer, data.dsize) == 0); free(data.dptr); ok1(ntdb_transaction_commit(ntdb) == 0); ok1(ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_fetch(ntdb, key, &data) == NTDB_SUCCESS); ok1(data.dsize == 1000); ok1(memcmp(data.dptr, buffer, data.dsize) == 0); free(data.dptr); ntdb_close(ntdb); } ok1(tap_log_messages == 0); free(buffer); return exit_status(); } ntdb-1.0/test/api-60-noop-transaction.c000066400000000000000000000027311224151530700177170ustar00rootroot00000000000000#include "private.h" // struct ntdb_context #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include "logging.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4), d; plan_tests(sizeof(flags) / sizeof(flags[0]) * 12 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("api-60-transaction.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_transaction_start(ntdb) == 0); /* Do an identical replace. */ ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == 0); ok1(ntdb_transaction_commit(ntdb) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(ntdb_deq(data, d)); free(d.dptr); ntdb_close(ntdb); /* Reopen, fetch. */ ntdb = ntdb_open("api-60-transaction.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(ntdb_deq(data, d)); free(d.dptr); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-80-tdb_fd.c000066400000000000000000000014421224151530700156430ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 3); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("api-80-ntdb_fd.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (!ok1(ntdb)) continue; if (flags[i] & NTDB_INTERNAL) ok1(ntdb_fd(ntdb) == -1); else ok1(ntdb_fd(ntdb) > 2); ntdb_close(ntdb); ok1(tap_log_messages == 0); } return exit_status(); } ntdb-1.0/test/api-81-seqnum.c000066400000000000000000000043041224151530700157320ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include "logging.h" int main(int argc, char *argv[]) { unsigned int i, seq; struct ntdb_context *ntdb; NTDB_DATA d = { NULL, 0 }; /* Bogus GCC warning */ NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4); int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 15 + 4 * 13); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("api-81-seqnum.ntdb", flags[i]|NTDB_SEQNUM|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (!ok1(ntdb)) continue; seq = 0; ok1(ntdb_get_seqnum(ntdb) == seq); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_get_seqnum(ntdb) == ++seq); /* Fetch doesn't change seqnum */ if (ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS)) free(d.dptr); ok1(ntdb_get_seqnum(ntdb) == seq); ok1(ntdb_append(ntdb, key, data) == NTDB_SUCCESS); ok1(ntdb_get_seqnum(ntdb) == ++seq); ok1(ntdb_delete(ntdb, key) == NTDB_SUCCESS); ok1(ntdb_get_seqnum(ntdb) == ++seq); /* Empty append works */ ok1(ntdb_append(ntdb, key, data) == NTDB_SUCCESS); ok1(ntdb_get_seqnum(ntdb) == ++seq); ok1(ntdb_wipe_all(ntdb) == NTDB_SUCCESS); ok1(ntdb_get_seqnum(ntdb) == ++seq); if (!(flags[i] & NTDB_INTERNAL)) { ok1(ntdb_transaction_start(ntdb) == NTDB_SUCCESS); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_get_seqnum(ntdb) == ++seq); ok1(ntdb_append(ntdb, key, data) == NTDB_SUCCESS); ok1(ntdb_get_seqnum(ntdb) == ++seq); ok1(ntdb_delete(ntdb, key) == NTDB_SUCCESS); ok1(ntdb_get_seqnum(ntdb) == ++seq); ok1(ntdb_transaction_commit(ntdb) == NTDB_SUCCESS); ok1(ntdb_get_seqnum(ntdb) == seq); ok1(ntdb_transaction_start(ntdb) == NTDB_SUCCESS); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_get_seqnum(ntdb) == seq + 1); ntdb_transaction_cancel(ntdb); ok1(ntdb_get_seqnum(ntdb) == seq); } ntdb_close(ntdb); ok1(tap_log_messages == 0); } return exit_status(); } ntdb-1.0/test/api-82-lockattr.c000066400000000000000000000150071224151530700162500ustar00rootroot00000000000000#include "private.h" // for ntdb_fcntl_unlock #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include "logging.h" static int mylock(int fd, int rw, off_t off, off_t len, bool waitflag, void *_err) { int *lock_err = _err; struct flock fl; int ret; if (*lock_err) { errno = *lock_err; return -1; } do { fl.l_type = rw; fl.l_whence = SEEK_SET; fl.l_start = off; fl.l_len = len; if (waitflag) ret = fcntl(fd, F_SETLKW, &fl); else ret = fcntl(fd, F_SETLK, &fl); } while (ret != 0 && errno == EINTR); return ret; } static int trav_err; static int trav(struct ntdb_context *ntdb, NTDB_DATA k, NTDB_DATA d, int *terr) { *terr = trav_err; return 0; } int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; union ntdb_attribute lock_attr; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4); int lock_err; lock_attr.base.attr = NTDB_ATTRIBUTE_FLOCK; lock_attr.base.next = &tap_log_attr; lock_attr.flock.lock = mylock; lock_attr.flock.unlock = ntdb_fcntl_unlock; lock_attr.flock.data = &lock_err; plan_tests(sizeof(flags) / sizeof(flags[0]) * 81); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { NTDB_DATA d; /* Nonblocking open; expect no error message. */ lock_err = EAGAIN; ntdb = ntdb_open("run-82-lockattr.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &lock_attr); ok(errno == lock_err, "Errno is %u", errno); ok1(!ntdb); ok1(tap_log_messages == 0); lock_err = EINTR; ntdb = ntdb_open("run-82-lockattr.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &lock_attr); ok(errno == lock_err, "Errno is %u", errno); ok1(!ntdb); ok1(tap_log_messages == 0); /* Forced fail open. */ lock_err = ENOMEM; ntdb = ntdb_open("run-82-lockattr.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &lock_attr); ok1(errno == lock_err); ok1(!ntdb); ok1(tap_log_messages == 1); tap_log_messages = 0; lock_err = 0; ntdb = ntdb_open("run-82-lockattr.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &lock_attr); if (!ok1(ntdb)) continue; ok1(tap_log_messages == 0); /* Nonblocking store. */ lock_err = EAGAIN; ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = EINTR; ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = ENOMEM; ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == NTDB_ERR_LOCK); ok1(tap_log_messages == 1); tap_log_messages = 0; /* Nonblocking fetch. */ lock_err = EAGAIN; ok1(!ntdb_exists(ntdb, key)); ok1(tap_log_messages == 0); lock_err = EINTR; ok1(!ntdb_exists(ntdb, key)); ok1(tap_log_messages == 0); lock_err = ENOMEM; ok1(!ntdb_exists(ntdb, key)); ok1(tap_log_messages == 1); tap_log_messages = 0; lock_err = EAGAIN; ok1(ntdb_fetch(ntdb, key, &d) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = EINTR; ok1(ntdb_fetch(ntdb, key, &d) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = ENOMEM; ok1(ntdb_fetch(ntdb, key, &d) == NTDB_ERR_LOCK); ok1(tap_log_messages == 1); tap_log_messages = 0; /* Nonblocking delete. */ lock_err = EAGAIN; ok1(ntdb_delete(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = EINTR; ok1(ntdb_delete(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = ENOMEM; ok1(ntdb_delete(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 1); tap_log_messages = 0; /* Nonblocking locks. */ lock_err = EAGAIN; ok1(ntdb_chainlock(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = EINTR; ok1(ntdb_chainlock(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = ENOMEM; ok1(ntdb_chainlock(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 1); tap_log_messages = 0; lock_err = EAGAIN; ok1(ntdb_chainlock_read(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = EINTR; ok1(ntdb_chainlock_read(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = ENOMEM; ok1(ntdb_chainlock_read(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 1); tap_log_messages = 0; lock_err = EAGAIN; ok1(ntdb_lockall(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = EINTR; ok1(ntdb_lockall(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = ENOMEM; ok1(ntdb_lockall(ntdb) == NTDB_ERR_LOCK); /* This actually does divide and conquer. */ ok1(tap_log_messages > 0); tap_log_messages = 0; lock_err = EAGAIN; ok1(ntdb_lockall_read(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = EINTR; ok1(ntdb_lockall_read(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = ENOMEM; ok1(ntdb_lockall_read(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages > 0); tap_log_messages = 0; /* Nonblocking traverse; go nonblock partway through. */ lock_err = 0; ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == 0); /* Need two entries to ensure two lock attempts! */ ok1(ntdb_store(ntdb, ntdb_mkdata("key2", 4), data, NTDB_REPLACE) == 0); trav_err = EAGAIN; ok1(ntdb_traverse(ntdb, trav, &lock_err) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); trav_err = EINTR; lock_err = 0; ok1(ntdb_traverse(ntdb, trav, &lock_err) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); trav_err = ENOMEM; lock_err = 0; ok1(ntdb_traverse(ntdb, trav, &lock_err) == NTDB_ERR_LOCK); ok1(tap_log_messages == 1); tap_log_messages = 0; /* Nonblocking transactions. */ lock_err = EAGAIN; ok1(ntdb_transaction_start(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = EINTR; ok1(ntdb_transaction_start(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = ENOMEM; ok1(ntdb_transaction_start(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages == 1); tap_log_messages = 0; /* Nonblocking transaction prepare. */ lock_err = 0; ok1(ntdb_transaction_start(ntdb) == 0); ok1(ntdb_delete(ntdb, key) == 0); lock_err = EAGAIN; ok1(ntdb_transaction_prepare_commit(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); lock_err = 0; ok1(ntdb_transaction_prepare_commit(ntdb) == 0); ok1(ntdb_transaction_commit(ntdb) == 0); /* And the transaction was committed, right? */ ok1(!ntdb_exists(ntdb, key)); ntdb_close(ntdb); ok1(tap_log_messages == 0); } return exit_status(); } ntdb-1.0/test/api-83-openhook.c000066400000000000000000000056421224151530700162540ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include #include #include #include "external-agent.h" #include "logging.h" #define KEY_STR "key" static enum NTDB_ERROR clear_if_first(int fd, void *arg) { /* We hold a lock offset 4 always, so we can tell if anyone is holding it. * (This is compatible with tdb's TDB_CLEAR_IF_FIRST flag). */ struct flock fl; if (arg != clear_if_first) return NTDB_ERR_CORRUPT; fl.l_type = F_WRLCK; fl.l_whence = SEEK_SET; fl.l_start = 4; fl.l_len = 1; if (fcntl(fd, F_SETLK, &fl) == 0) { /* We must be first ones to open it! */ diag("truncating file!"); if (ftruncate(fd, 0) != 0) { return NTDB_ERR_IO; } } fl.l_type = F_RDLCK; if (fcntl(fd, F_SETLKW, &fl) != 0) { return NTDB_ERR_IO; } return NTDB_SUCCESS; } int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb, *ntdb2; struct agent *agent; union ntdb_attribute cif; NTDB_DATA key = ntdb_mkdata(KEY_STR, strlen(KEY_STR)); int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; cif.openhook.base.attr = NTDB_ATTRIBUTE_OPENHOOK; cif.openhook.base.next = &tap_log_attr; cif.openhook.fn = clear_if_first; cif.openhook.data = clear_if_first; agent = prepare_external_agent(); plan_tests(sizeof(flags) / sizeof(flags[0]) * 16); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { /* Create it */ ntdb = ntdb_open("run-83-openhook.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, NULL); ok1(ntdb); ok1(ntdb_store(ntdb, key, key, NTDB_REPLACE) == 0); ntdb_close(ntdb); /* Now, open with CIF, should clear it. */ ntdb = ntdb_open("run-83-openhook.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR, 0, &cif); ok1(ntdb); ok1(!ntdb_exists(ntdb, key)); ok1(ntdb_store(ntdb, key, key, NTDB_REPLACE) == 0); /* Agent should not clear it, since it's still open. */ ok1(external_agent_operation(agent, OPEN_WITH_HOOK, "run-83-openhook.ntdb") == SUCCESS); ok1(external_agent_operation(agent, FETCH, KEY_STR "=" KEY_STR) == SUCCESS); ok1(external_agent_operation(agent, CLOSE, "") == SUCCESS); /* Still exists for us too. */ ok1(ntdb_exists(ntdb, key)); /* Nested open should not erase db. */ ntdb2 = ntdb_open("run-83-openhook.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR, 0, &cif); ok1(ntdb_exists(ntdb2, key)); ok1(ntdb_exists(ntdb, key)); ntdb_close(ntdb2); ok1(ntdb_exists(ntdb, key)); /* Close it, now agent should clear it. */ ntdb_close(ntdb); ok1(external_agent_operation(agent, OPEN_WITH_HOOK, "run-83-openhook.ntdb") == SUCCESS); ok1(external_agent_operation(agent, FETCH, KEY_STR "=" KEY_STR) == FAILED); ok1(external_agent_operation(agent, CLOSE, "") == SUCCESS); ok1(tap_log_messages == 0); } free_external_agent(agent); return exit_status(); } ntdb-1.0/test/api-91-get-stats.c000066400000000000000000000032301224151530700163330ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include #include "logging.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 11); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { union ntdb_attribute *attr; NTDB_DATA key = ntdb_mkdata("key", 3), data; ntdb = ntdb_open("run-91-get-stats.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); /* Force an expansion */ data.dsize = 65536; data.dptr = calloc(data.dsize, 1); ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == 0); free(data.dptr); /* Use malloc so valgrind will catch overruns. */ attr = malloc(sizeof *attr); attr->stats.base.attr = NTDB_ATTRIBUTE_STATS; attr->stats.size = sizeof(*attr); ok1(ntdb_get_attribute(ntdb, attr) == 0); ok1(attr->stats.size == sizeof(*attr)); ok1(attr->stats.allocs > 0); ok1(attr->stats.expands > 0); ok1(attr->stats.locks > 0); free(attr); /* Try short one. */ attr = malloc(offsetof(struct ntdb_attribute_stats, allocs) + sizeof(attr->stats.allocs)); attr->stats.base.attr = NTDB_ATTRIBUTE_STATS; attr->stats.size = offsetof(struct ntdb_attribute_stats, allocs) + sizeof(attr->stats.allocs); ok1(ntdb_get_attribute(ntdb, attr) == 0); ok1(attr->stats.size == sizeof(*attr)); ok1(attr->stats.allocs > 0); free(attr); ok1(tap_log_messages == 0); ntdb_close(ntdb); } return exit_status(); } ntdb-1.0/test/api-92-get-set-readonly.c000066400000000000000000000064751224151530700176220ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4); int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 48); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { /* RW -> R0 */ ntdb = ntdb_open("run-92-get-set-readonly.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); ok1(!(ntdb_get_flags(ntdb) & NTDB_RDONLY)); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == NTDB_SUCCESS); ntdb_add_flag(ntdb, NTDB_RDONLY); ok1(ntdb_get_flags(ntdb) & NTDB_RDONLY); /* Can't store, append, delete. */ ok1(ntdb_store(ntdb, key, data, NTDB_MODIFY) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 1); ok1(ntdb_append(ntdb, key, data) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 2); ok1(ntdb_delete(ntdb, key) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 3); /* Can't start a transaction, or any write lock. */ ok1(ntdb_transaction_start(ntdb) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 4); ok1(ntdb_chainlock(ntdb, key) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 5); ok1(ntdb_lockall(ntdb) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 6); ok1(ntdb_wipe_all(ntdb) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 7); /* Back to RW. */ ntdb_remove_flag(ntdb, NTDB_RDONLY); ok1(!(ntdb_get_flags(ntdb) & NTDB_RDONLY)); ok1(ntdb_store(ntdb, key, data, NTDB_MODIFY) == NTDB_SUCCESS); ok1(ntdb_append(ntdb, key, data) == NTDB_SUCCESS); ok1(ntdb_delete(ntdb, key) == NTDB_SUCCESS); ok1(ntdb_transaction_start(ntdb) == NTDB_SUCCESS); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == NTDB_SUCCESS); ok1(ntdb_transaction_commit(ntdb) == NTDB_SUCCESS); ok1(ntdb_chainlock(ntdb, key) == NTDB_SUCCESS); ntdb_chainunlock(ntdb, key); ok1(ntdb_lockall(ntdb) == NTDB_SUCCESS); ntdb_unlockall(ntdb); ok1(ntdb_wipe_all(ntdb) == NTDB_SUCCESS); ok1(tap_log_messages == 7); ntdb_close(ntdb); /* R0 -> RW */ ntdb = ntdb_open("run-92-get-set-readonly.ntdb", flags[i]|MAYBE_NOSYNC, O_RDONLY, 0600, &tap_log_attr); ok1(ntdb); ok1(ntdb_get_flags(ntdb) & NTDB_RDONLY); /* Can't store, append, delete. */ ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 8); ok1(ntdb_append(ntdb, key, data) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 9); ok1(ntdb_delete(ntdb, key) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 10); /* Can't start a transaction, or any write lock. */ ok1(ntdb_transaction_start(ntdb) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 11); ok1(ntdb_chainlock(ntdb, key) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 12); ok1(ntdb_lockall(ntdb) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 13); ok1(ntdb_wipe_all(ntdb) == NTDB_ERR_RDONLY); ok1(tap_log_messages == 14); /* Can't remove NTDB_RDONLY since we opened with O_RDONLY */ ntdb_remove_flag(ntdb, NTDB_RDONLY); ok1(tap_log_messages == 15); ok1(ntdb_get_flags(ntdb) & NTDB_RDONLY); ntdb_close(ntdb); ok1(tap_log_messages == 15); tap_log_messages = 0; } return exit_status(); } ntdb-1.0/test/api-93-repack.c000066400000000000000000000033141224151530700156720ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" #define NUM_TESTS 1000 static bool store_all(struct ntdb_context *ntdb) { unsigned int i; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA dbuf = { (unsigned char *)&i, sizeof(i) }; for (i = 0; i < NUM_TESTS; i++) { if (ntdb_store(ntdb, key, dbuf, NTDB_INSERT) != NTDB_SUCCESS) return false; } return true; } static int mark_entry(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA data, bool found[]) { unsigned int num; if (key.dsize != sizeof(num)) return -1; memcpy(&num, key.dptr, key.dsize); if (num >= NUM_TESTS) return -1; if (found[num]) return -1; found[num] = true; return 0; } static bool is_all_set(bool found[], unsigned int num) { unsigned int i; for (i = 0; i < num; i++) if (!found[i]) return false; return true; } int main(int argc, char *argv[]) { unsigned int i; bool found[NUM_TESTS]; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 6 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-93-repack.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) break; ok1(store_all(ntdb)); ok1(ntdb_repack(ntdb) == NTDB_SUCCESS); memset(found, 0, sizeof(found)); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); ok1(ntdb_traverse(ntdb, mark_entry, found) == NUM_TESTS); ok1(is_all_set(found, NUM_TESTS)); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-94-expand-during-parse.c000066400000000000000000000042051224151530700203030ustar00rootroot00000000000000/* We use direct access to hand to the parse function: what if db expands? */ #include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" #include "../private.h" /* To establish size, esp. for NTDB_INTERNAL dbs */ static struct ntdb_context *ntdb; static off_t ntdb_size(void) { return ntdb->file->map_size; } struct parse_info { unsigned int depth; NTDB_DATA expected; }; static enum NTDB_ERROR parse(NTDB_DATA key, NTDB_DATA data, struct parse_info *pinfo) { off_t flen; unsigned int i; if (!ntdb_deq(data, pinfo->expected)) return NTDB_ERR_EINVAL; flen = ntdb_size(); for (i = 0; ntdb_size() == flen; i++) { NTDB_DATA add = ntdb_mkdata(&i, sizeof(i)); /* This is technically illegal parse(), which is why we * grabbed allrecord lock.*/ ntdb_store(ntdb, add, add, NTDB_INSERT); } /* Access the record again. */ if (!ntdb_deq(data, pinfo->expected)) return NTDB_ERR_EINVAL; /* Recurse! Woot! */ if (pinfo->depth != 0) { enum NTDB_ERROR ecode; pinfo->depth--; ecode = ntdb_parse_record(ntdb, key, parse, pinfo); if (ecode) { return ecode; } } /* Access the record one more time. */ if (!ntdb_deq(data, pinfo->expected)) return NTDB_ERR_EINVAL; return NTDB_SUCCESS; } int main(int argc, char *argv[]) { unsigned int i; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; struct parse_info pinfo; NTDB_DATA key = ntdb_mkdata("hello", 5), data = ntdb_mkdata("world", 5); plan_tests(sizeof(flags) / sizeof(flags[0]) * 3 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("api-94-expand-during-parse.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == NTDB_SUCCESS); ok1(ntdb_lockall(ntdb) == NTDB_SUCCESS); pinfo.expected = data; pinfo.depth = 3; ok1(ntdb_parse_record(ntdb, key, parse, &pinfo) == NTDB_SUCCESS); ntdb_unlockall(ntdb); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-95-read-only-during-parse.c000066400000000000000000000041441224151530700207210ustar00rootroot00000000000000/* Make sure write operations fail during ntdb_parse(). */ #include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" static struct ntdb_context *ntdb; /* We could get either of these. */ static bool xfail(enum NTDB_ERROR ecode) { return ecode == NTDB_ERR_RDONLY || ecode == NTDB_ERR_LOCK; } static enum NTDB_ERROR parse(NTDB_DATA key, NTDB_DATA data, NTDB_DATA *expected) { NTDB_DATA add = ntdb_mkdata("another", strlen("another")); if (!ntdb_deq(data, *expected)) { return NTDB_ERR_EINVAL; } /* These should all fail.*/ if (!xfail(ntdb_store(ntdb, add, add, NTDB_INSERT))) { return NTDB_ERR_EINVAL; } tap_log_messages--; if (!xfail(ntdb_append(ntdb, key, add))) { return NTDB_ERR_EINVAL; } tap_log_messages--; if (!xfail(ntdb_delete(ntdb, key))) { return NTDB_ERR_EINVAL; } tap_log_messages--; if (!xfail(ntdb_transaction_start(ntdb))) { return NTDB_ERR_EINVAL; } tap_log_messages--; if (!xfail(ntdb_chainlock(ntdb, key))) { return NTDB_ERR_EINVAL; } tap_log_messages--; if (!xfail(ntdb_lockall(ntdb))) { return NTDB_ERR_EINVAL; } tap_log_messages--; if (!xfail(ntdb_wipe_all(ntdb))) { return NTDB_ERR_EINVAL; } tap_log_messages--; if (!xfail(ntdb_repack(ntdb))) { return NTDB_ERR_EINVAL; } tap_log_messages--; /* Access the record one more time. */ if (!ntdb_deq(data, *expected)) { return NTDB_ERR_EINVAL; } return NTDB_SUCCESS; } int main(int argc, char *argv[]) { unsigned int i; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("hello", 5), data = ntdb_mkdata("world", 5); plan_tests(sizeof(flags) / sizeof(flags[0]) * 2 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("api-95-read-only-during-parse.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == NTDB_SUCCESS); ok1(ntdb_parse_record(ntdb, key, parse, &data) == NTDB_SUCCESS); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-add-remove-flags.c000066400000000000000000000041611224151530700173120ustar00rootroot00000000000000#include "private.h" // for ntdb_context #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(87); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-add-remove-flags.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; ok1(ntdb_get_flags(ntdb) == ntdb->flags); tap_log_messages = 0; ntdb_add_flag(ntdb, NTDB_NOLOCK); if (flags[i] & NTDB_INTERNAL) ok1(tap_log_messages == 1); else { ok1(tap_log_messages == 0); ok1(ntdb_get_flags(ntdb) & NTDB_NOLOCK); } tap_log_messages = 0; ntdb_add_flag(ntdb, NTDB_NOMMAP); if (flags[i] & NTDB_INTERNAL) ok1(tap_log_messages == 1); else { ok1(tap_log_messages == 0); ok1(ntdb_get_flags(ntdb) & NTDB_NOMMAP); ok1(ntdb->file->map_ptr == NULL); } tap_log_messages = 0; ntdb_add_flag(ntdb, NTDB_NOSYNC); if (flags[i] & NTDB_INTERNAL) ok1(tap_log_messages == 1); else { ok1(tap_log_messages == 0); ok1(ntdb_get_flags(ntdb) & NTDB_NOSYNC); } ok1(ntdb_get_flags(ntdb) == ntdb->flags); tap_log_messages = 0; ntdb_remove_flag(ntdb, NTDB_NOLOCK); if (flags[i] & NTDB_INTERNAL) ok1(tap_log_messages == 1); else { ok1(tap_log_messages == 0); ok1(!(ntdb_get_flags(ntdb) & NTDB_NOLOCK)); } tap_log_messages = 0; ntdb_remove_flag(ntdb, NTDB_NOMMAP); if (flags[i] & NTDB_INTERNAL) ok1(tap_log_messages == 1); else { ok1(tap_log_messages == 0); ok1(!(ntdb_get_flags(ntdb) & NTDB_NOMMAP)); ok1(ntdb->file->map_ptr != NULL); } tap_log_messages = 0; ntdb_remove_flag(ntdb, NTDB_NOSYNC); if (flags[i] & NTDB_INTERNAL) ok1(tap_log_messages == 1); else { ok1(tap_log_messages == 0); ok1(!(ntdb_get_flags(ntdb) & NTDB_NOSYNC)); } ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-check-callback.c000066400000000000000000000036341224151530700170100ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" #define NUM_RECORDS 1000 static bool store_records(struct ntdb_context *ntdb) { int i; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA data = { (unsigned char *)&i, sizeof(i) }; for (i = 0; i < NUM_RECORDS; i++) if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != 0) return false; return true; } static enum NTDB_ERROR check(NTDB_DATA key, NTDB_DATA data, bool *array) { int val; if (key.dsize != sizeof(val)) { diag("Wrong key size: %zu\n", key.dsize); return NTDB_ERR_CORRUPT; } if (key.dsize != data.dsize || memcmp(key.dptr, data.dptr, sizeof(val)) != 0) { diag("Key and data differ\n"); return NTDB_ERR_CORRUPT; } memcpy(&val, key.dptr, sizeof(val)); if (val >= NUM_RECORDS || val < 0) { diag("check value %i\n", val); return NTDB_ERR_CORRUPT; } if (array[val]) { diag("Value %i already seen\n", val); return NTDB_ERR_CORRUPT; } array[val] = true; return NTDB_SUCCESS; } int main(int argc, char *argv[]) { unsigned int i, j; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; return 0; plan_tests(sizeof(flags) / sizeof(flags[0]) * 4 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { bool array[NUM_RECORDS]; ntdb = ntdb_open("run-check-callback.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; ok1(store_records(ntdb)); for (j = 0; j < NUM_RECORDS; j++) array[j] = false; ok1(ntdb_check(ntdb, check, array) == NTDB_SUCCESS); for (j = 0; j < NUM_RECORDS; j++) if (!array[j]) break; ok1(j == NUM_RECORDS); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-firstkey-nextkey.c000066400000000000000000000077151224151530700175320ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include "logging.h" #define NUM_RECORDS 1000 static bool store_records(struct ntdb_context *ntdb) { int i; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA data = { (unsigned char *)&i, sizeof(i) }; for (i = 0; i < NUM_RECORDS; i++) if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != 0) return false; return true; } struct trav_data { unsigned int records[NUM_RECORDS]; unsigned int calls; }; static int trav(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *p) { struct trav_data *td = p; int val; memcpy(&val, dbuf.dptr, dbuf.dsize); td->records[td->calls++] = val; return 0; } /* Since ntdb_nextkey frees dptr, we need to clone it. */ static NTDB_DATA dup_key(NTDB_DATA key) { void *p = malloc(key.dsize); memcpy(p, key.dptr, key.dsize); key.dptr = p; return key; } int main(int argc, char *argv[]) { unsigned int i, j; int num; struct trav_data td; NTDB_DATA k; struct ntdb_context *ntdb; union ntdb_attribute seed_attr; enum NTDB_ERROR ecode; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; seed_attr.base.attr = NTDB_ATTRIBUTE_SEED; seed_attr.base.next = &tap_log_attr; seed_attr.seed.seed = 6334326220117065685ULL; plan_tests(sizeof(flags) / sizeof(flags[0]) * (NUM_RECORDS*6 + (NUM_RECORDS-1)*3 + 22) + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("api-firstkey-nextkey.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &seed_attr); ok1(ntdb); if (!ntdb) continue; ok1(ntdb_firstkey(ntdb, &k) == NTDB_ERR_NOEXIST); /* One entry... */ k.dptr = (unsigned char *)# k.dsize = sizeof(num); num = 0; ok1(ntdb_store(ntdb, k, k, NTDB_INSERT) == 0); ok1(ntdb_firstkey(ntdb, &k) == NTDB_SUCCESS); ok1(k.dsize == sizeof(num)); ok1(memcmp(k.dptr, &num, sizeof(num)) == 0); ok1(ntdb_nextkey(ntdb, &k) == NTDB_ERR_NOEXIST); /* Two entries. */ k.dptr = (unsigned char *)# k.dsize = sizeof(num); num = 1; ok1(ntdb_store(ntdb, k, k, NTDB_INSERT) == 0); ok1(ntdb_firstkey(ntdb, &k) == NTDB_SUCCESS); ok1(k.dsize == sizeof(num)); memcpy(&num, k.dptr, sizeof(num)); ok1(num == 0 || num == 1); ok1(ntdb_nextkey(ntdb, &k) == NTDB_SUCCESS); ok1(k.dsize == sizeof(j)); memcpy(&j, k.dptr, sizeof(j)); ok1(j == 0 || j == 1); ok1(j != num); ok1(ntdb_nextkey(ntdb, &k) == NTDB_ERR_NOEXIST); /* Clean up. */ k.dptr = (unsigned char *)# k.dsize = sizeof(num); num = 0; ok1(ntdb_delete(ntdb, k) == 0); num = 1; ok1(ntdb_delete(ntdb, k) == 0); /* Now lots of records. */ ok1(store_records(ntdb)); td.calls = 0; num = ntdb_traverse(ntdb, trav, &td); ok1(num == NUM_RECORDS); ok1(td.calls == NUM_RECORDS); /* Simple loop should match ntdb_traverse */ for (j = 0, ecode = ntdb_firstkey(ntdb, &k); j < td.calls; j++) { int val; ok1(ecode == NTDB_SUCCESS); ok1(k.dsize == sizeof(val)); memcpy(&val, k.dptr, k.dsize); ok1(td.records[j] == val); ecode = ntdb_nextkey(ntdb, &k); } /* But arbitrary orderings should work too. */ for (j = td.calls-1; j > 0; j--) { k.dptr = (unsigned char *)&td.records[j-1]; k.dsize = sizeof(td.records[j-1]); k = dup_key(k); ok1(ntdb_nextkey(ntdb, &k) == NTDB_SUCCESS); ok1(k.dsize == sizeof(td.records[j])); ok1(memcmp(k.dptr, &td.records[j], k.dsize) == 0); free(k.dptr); } /* Even delete should work. */ for (j = 0, ecode = ntdb_firstkey(ntdb, &k); ecode != NTDB_ERR_NOEXIST; j++) { ok1(ecode == NTDB_SUCCESS); ok1(k.dsize == 4); ok1(ntdb_delete(ntdb, k) == 0); ecode = ntdb_nextkey(ntdb, &k); } diag("delete using first/nextkey gave %u of %u records", j, NUM_RECORDS); ok1(j == NUM_RECORDS); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-fork-test.c000066400000000000000000000114611224151530700161140ustar00rootroot00000000000000/* Test forking while holding lock. * * There are only five ways to do this currently: * (1) grab a ntdb_chainlock, then fork. * (2) grab a ntdb_lockall, then fork. * (3) grab a ntdb_lockall_read, then fork. * (4) start a transaction, then fork. * (5) fork from inside a ntdb_parse() callback. * * Note that we don't hold a lock across ntdb_traverse callbacks, so * that doesn't matter. */ #include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include #include #include #include "logging.h" static bool am_child = false; static enum NTDB_ERROR fork_in_parse(NTDB_DATA key, NTDB_DATA data, struct ntdb_context *ntdb) { int status; if (fork() == 0) { am_child = true; /* We expect this to fail. */ if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != NTDB_ERR_LOCK) exit(1); if (ntdb_fetch(ntdb, key, &data) != NTDB_ERR_LOCK) exit(1); if (tap_log_messages != 2) exit(2); return NTDB_SUCCESS; } wait(&status); ok1(WIFEXITED(status) && WEXITSTATUS(status) == 0); return NTDB_SUCCESS; } int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4); plan_tests(sizeof(flags) / sizeof(flags[0]) * 14); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { int status; tap_log_messages = 0; ntdb = ntdb_open("run-fork-test.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (!ok1(ntdb)) continue; /* Put a record in here. */ ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == NTDB_SUCCESS); ok1(ntdb_chainlock(ntdb, key) == NTDB_SUCCESS); if (fork() == 0) { /* We expect this to fail. */ if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != NTDB_ERR_LOCK) return 1; if (ntdb_fetch(ntdb, key, &data) != NTDB_ERR_LOCK) return 1; if (tap_log_messages != 2) return 2; /* Child can do this without any complaints. */ ntdb_chainunlock(ntdb, key); if (tap_log_messages != 2) return 3; ntdb_close(ntdb); if (tap_log_messages != 2) return 4; return 0; } wait(&status); ok1(WIFEXITED(status) && WEXITSTATUS(status) == 0); ntdb_chainunlock(ntdb, key); ok1(ntdb_lockall(ntdb) == NTDB_SUCCESS); if (fork() == 0) { /* We expect this to fail. */ if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != NTDB_ERR_LOCK) return 1; if (ntdb_fetch(ntdb, key, &data) != NTDB_ERR_LOCK) return 1; if (tap_log_messages != 2) return 2; /* Child can do this without any complaints. */ ntdb_unlockall(ntdb); if (tap_log_messages != 2) return 3; ntdb_close(ntdb); if (tap_log_messages != 2) return 4; return 0; } wait(&status); ok1(WIFEXITED(status) && WEXITSTATUS(status) == 0); ntdb_unlockall(ntdb); ok1(ntdb_lockall_read(ntdb) == NTDB_SUCCESS); if (fork() == 0) { /* We expect this to fail. */ /* This would always fail anyway... */ if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != NTDB_ERR_LOCK) return 1; if (ntdb_fetch(ntdb, key, &data) != NTDB_ERR_LOCK) return 1; if (tap_log_messages != 2) return 2; /* Child can do this without any complaints. */ ntdb_unlockall_read(ntdb); if (tap_log_messages != 2) return 3; ntdb_close(ntdb); if (tap_log_messages != 2) return 4; return 0; } wait(&status); ok1(WIFEXITED(status) && WEXITSTATUS(status) == 0); ntdb_unlockall_read(ntdb); ok1(ntdb_transaction_start(ntdb) == NTDB_SUCCESS); /* If transactions is empty, noop "commit" succeeds. */ ok1(ntdb_delete(ntdb, key) == NTDB_SUCCESS); if (fork() == 0) { int last_log_messages; /* We expect this to fail. */ if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != NTDB_ERR_LOCK) return 1; if (ntdb_fetch(ntdb, key, &data) != NTDB_ERR_LOCK) return 1; if (tap_log_messages != 2) return 2; if (ntdb_transaction_prepare_commit(ntdb) != NTDB_ERR_LOCK) return 3; if (tap_log_messages == 2) return 4; last_log_messages = tap_log_messages; /* Child can do this without any complaints. */ ntdb_transaction_cancel(ntdb); if (tap_log_messages != last_log_messages) return 4; ntdb_close(ntdb); if (tap_log_messages != last_log_messages) return 4; return 0; } wait(&status); ok1(WIFEXITED(status) && WEXITSTATUS(status) == 0); ntdb_transaction_cancel(ntdb); ok1(ntdb_parse_record(ntdb, key, fork_in_parse, ntdb) == NTDB_SUCCESS); ntdb_close(ntdb); if (am_child) { /* Child can return from parse without complaints. */ if (tap_log_messages != 2) exit(3); exit(0); } ok1(tap_log_messages == 0); } return exit_status(); } ntdb-1.0/test/api-locktimeout.c000066400000000000000000000115451224151530700165400ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include "system/wait.h" #include #include #include #include #include #include #include "logging.h" #include "external-agent.h" #undef alarm #define alarm fast_alarm /* Speed things up by doing things in milliseconds. */ static unsigned int fast_alarm(unsigned int milli_seconds) { struct itimerval it; it.it_interval.tv_sec = it.it_interval.tv_usec = 0; it.it_value.tv_sec = milli_seconds / 1000; it.it_value.tv_usec = milli_seconds * 1000; setitimer(ITIMER_REAL, &it, NULL); return 0; } #define CatchSignal(sig, handler) signal((sig), (handler)) static void do_nothing(int signum) { } /* This example code is taken from SAMBA, so try not to change it. */ static struct flock flock_struct; /* Return a value which is none of v1, v2 or v3. */ static inline short int invalid_value(short int v1, short int v2, short int v3) { short int try = (v1+v2+v3)^((v1+v2+v3) << 16); while (try == v1 || try == v2 || try == v3) try++; return try; } /* We invalidate in as many ways as we can, so the OS rejects it */ static void invalidate_flock_struct(int signum) { flock_struct.l_type = invalid_value(F_RDLCK, F_WRLCK, F_UNLCK); flock_struct.l_whence = invalid_value(SEEK_SET, SEEK_CUR, SEEK_END); flock_struct.l_start = -1; /* A large negative. */ flock_struct.l_len = (((off_t)1 << (sizeof(off_t)*CHAR_BIT - 1)) + 1); } static int timeout_lock(int fd, int rw, off_t off, off_t len, bool waitflag, void *_timeout) { int ret, saved_errno = errno; unsigned int timeout = *(unsigned int *)_timeout; flock_struct.l_type = rw; flock_struct.l_whence = SEEK_SET; flock_struct.l_start = off; flock_struct.l_len = len; CatchSignal(SIGALRM, invalidate_flock_struct); alarm(timeout); for (;;) { if (waitflag) ret = fcntl(fd, F_SETLKW, &flock_struct); else ret = fcntl(fd, F_SETLK, &flock_struct); if (ret == 0) break; /* Not signalled? Something else went wrong. */ if (flock_struct.l_len == len) { if (errno == EAGAIN || errno == EINTR) continue; saved_errno = errno; break; } else { saved_errno = EINTR; break; } } alarm(0); errno = saved_errno; return ret; } static int ntdb_chainlock_with_timeout_internal(struct ntdb_context *ntdb, NTDB_DATA key, unsigned int timeout, int rw_type) { union ntdb_attribute locking; enum NTDB_ERROR ecode; if (timeout) { locking.base.attr = NTDB_ATTRIBUTE_FLOCK; ecode = ntdb_get_attribute(ntdb, &locking); if (ecode != NTDB_SUCCESS) return ecode; /* Replace locking function with our own. */ locking.flock.data = &timeout; locking.flock.lock = timeout_lock; ecode = ntdb_set_attribute(ntdb, &locking); if (ecode != NTDB_SUCCESS) return ecode; } if (rw_type == F_RDLCK) ecode = ntdb_chainlock_read(ntdb, key); else ecode = ntdb_chainlock(ntdb, key); if (timeout) { ntdb_unset_attribute(ntdb, NTDB_ATTRIBUTE_FLOCK); } return ecode; } int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; NTDB_DATA key = ntdb_mkdata("hello", 5); int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; struct agent *agent; plan_tests(sizeof(flags) / sizeof(flags[0]) * 15); agent = prepare_external_agent(); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { enum NTDB_ERROR ecode; ntdb = ntdb_open("run-locktimeout.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (!ok1(ntdb)) break; /* Simple cases: should succeed. */ ecode = ntdb_chainlock_with_timeout_internal(ntdb, key, 20, F_RDLCK); ok1(ecode == NTDB_SUCCESS); ok1(tap_log_messages == 0); ntdb_chainunlock_read(ntdb, key); ok1(tap_log_messages == 0); ecode = ntdb_chainlock_with_timeout_internal(ntdb, key, 20, F_WRLCK); ok1(ecode == NTDB_SUCCESS); ok1(tap_log_messages == 0); ntdb_chainunlock(ntdb, key); ok1(tap_log_messages == 0); /* OK, get agent to start transaction, then we should time out. */ ok1(external_agent_operation(agent, OPEN, "run-locktimeout.ntdb") == SUCCESS); ok1(external_agent_operation(agent, TRANSACTION_START, "") == SUCCESS); ecode = ntdb_chainlock_with_timeout_internal(ntdb, key, 20, F_WRLCK); ok1(ecode == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); /* Even if we get a different signal, should be fine. */ CatchSignal(SIGUSR1, do_nothing); external_agent_operation(agent, SEND_SIGNAL, ""); ecode = ntdb_chainlock_with_timeout_internal(ntdb, key, 20, F_WRLCK); ok1(ecode == NTDB_ERR_LOCK); ok1(tap_log_messages == 0); ok1(external_agent_operation(agent, TRANSACTION_COMMIT, "") == SUCCESS); ok1(external_agent_operation(agent, CLOSE, "") == SUCCESS); ntdb_close(ntdb); } free_external_agent(agent); return exit_status(); } ntdb-1.0/test/api-missing-entries.c000066400000000000000000000022511224151530700173130ustar00rootroot00000000000000/* Another test revealed that we lost an entry. This reproduces it. */ #include "config.h" #include "ntdb.h" #include #include "tap-interface.h" #include #include #include #include "logging.h" #define NUM_RECORDS 1189 /* We use the same seed which we saw this failure on. */ static uint32_t failhash(const void *key, size_t len, uint32_t seed, void *p) { return hash64_stable((const unsigned char *)key, len, 699537674708983027ULL); } int main(int argc, char *argv[]) { int i; struct ntdb_context *ntdb; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA data = { (unsigned char *)&i, sizeof(i) }; union ntdb_attribute hattr = { .hash = { .base = { NTDB_ATTRIBUTE_HASH }, .fn = failhash } }; hattr.base.next = &tap_log_attr; plan_tests(1 + NUM_RECORDS + 2); ntdb = ntdb_open("run-missing-entries.ntdb", NTDB_INTERNAL, O_RDWR|O_CREAT|O_TRUNC, 0600, &hattr); if (ok1(ntdb)) { for (i = 0; i < NUM_RECORDS; i++) { ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == 0); } ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-open-multiple-times.c000066400000000000000000000051561224151530700201130ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include "logging.h" #include "../private.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb, *ntdb2; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA data = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA d = { NULL, 0 }; /* Bogus GCC warning */ int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 30); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-open-multiple-times.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; ntdb2 = ntdb_open("run-open-multiple-times.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT, 0600, &tap_log_attr); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_check(ntdb2, NULL, NULL) == 0); ok1((flags[i] & NTDB_NOMMAP) || ntdb2->file->map_ptr); /* Store in one, fetch in the other. */ ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == 0); ok1(ntdb_fetch(ntdb2, key, &d) == NTDB_SUCCESS); ok1(ntdb_deq(d, data)); free(d.dptr); /* Vice versa, with delete. */ ok1(ntdb_delete(ntdb2, key) == 0); ok1(ntdb_fetch(ntdb, key, &d) == NTDB_ERR_NOEXIST); /* OK, now close first one, check second still good. */ ok1(ntdb_close(ntdb) == 0); ok1((flags[i] & NTDB_NOMMAP) || ntdb2->file->map_ptr); ok1(ntdb_store(ntdb2, key, data, NTDB_REPLACE) == 0); ok1(ntdb_fetch(ntdb2, key, &d) == NTDB_SUCCESS); ok1(ntdb_deq(d, data)); free(d.dptr); /* Reopen */ ntdb = ntdb_open("run-open-multiple-times.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT, 0600, &tap_log_attr); ok1(ntdb); ok1(ntdb_transaction_start(ntdb2) == 0); /* Anything in the other one should fail. */ ok1(ntdb_fetch(ntdb, key, &d) == NTDB_ERR_LOCK); ok1(tap_log_messages == 1); ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == NTDB_ERR_LOCK); ok1(tap_log_messages == 2); ok1(ntdb_transaction_start(ntdb) == NTDB_ERR_LOCK); ok1(tap_log_messages == 3); ok1(ntdb_chainlock(ntdb, key) == NTDB_ERR_LOCK); ok1(tap_log_messages == 4); /* Transaciton should work as normal. */ ok1(ntdb_store(ntdb2, key, data, NTDB_REPLACE) == NTDB_SUCCESS); /* Now... try closing with locks held. */ ok1(ntdb_close(ntdb2) == 0); ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(ntdb_deq(d, data)); free(d.dptr); ok1(ntdb_close(ntdb) == 0); ok1(tap_log_messages == 4); tap_log_messages = 0; } return exit_status(); } ntdb-1.0/test/api-record-expand.c000066400000000000000000000024501224151530700167270ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include "logging.h" #define MAX_SIZE 10000 #define SIZE_STEP 131 int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data; data.dptr = malloc(MAX_SIZE); memset(data.dptr, 0x24, MAX_SIZE); plan_tests(sizeof(flags) / sizeof(flags[0]) * (3 + (1 + (MAX_SIZE/SIZE_STEP)) * 2) + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-record-expand.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; data.dsize = 0; ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); for (data.dsize = 0; data.dsize < MAX_SIZE; data.dsize += SIZE_STEP) { memset(data.dptr, data.dsize, data.dsize); ok1(ntdb_store(ntdb, key, data, NTDB_MODIFY) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); } ntdb_close(ntdb); } ok1(tap_log_messages == 0); free(data.dptr); return exit_status(); } ntdb-1.0/test/api-simple-delete.c000066400000000000000000000022121224151530700167210ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include "logging.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4); plan_tests(sizeof(flags) / sizeof(flags[0]) * 7 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-simple-delete.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (ntdb) { /* Delete should fail. */ ok1(ntdb_delete(ntdb, key) == NTDB_ERR_NOEXIST); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Insert should succeed. */ ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Delete should now work. */ ok1(ntdb_delete(ntdb, key) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); } } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/api-summary.c000066400000000000000000000031651224151530700156750ustar00rootroot00000000000000#include "config.h" #include "ntdb.h" #include "tap-interface.h" #include #include #include #include #include "logging.h" int main(int argc, char *argv[]) { unsigned int i, j; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = { (unsigned char *)&j, sizeof(j) }; NTDB_DATA data = { (unsigned char *)&j, sizeof(j) }; char *summary; plan_tests(sizeof(flags) / sizeof(flags[0]) * (1 + 2 * 5) + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-summary.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; /* Put some stuff in there. */ for (j = 0; j < 500; j++) { /* Make sure padding varies to we get some graphs! */ data.dsize = j % (sizeof(j) + 1); if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != 0) fail("Storing in ntdb"); } for (j = 0; j <= NTDB_SUMMARY_HISTOGRAMS; j += NTDB_SUMMARY_HISTOGRAMS) { ok1(ntdb_summary(ntdb, j, &summary) == NTDB_SUCCESS); ok1(strstr(summary, "Number of records: 500\n")); ok1(strstr(summary, "Smallest/average/largest keys: 4/4/4\n")); ok1(strstr(summary, "Smallest/average/largest data: 0/2/4\n")); if (j == NTDB_SUMMARY_HISTOGRAMS) { ok1(strstr(summary, "|") && strstr(summary, "*")); } else { ok1(!strstr(summary, "|") && !strstr(summary, "*")); } free(summary); } ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/external-agent.c000066400000000000000000000135101224151530700163420ustar00rootroot00000000000000#include "external-agent.h" #include "logging.h" #include "lock-tracking.h" #include #include #include #include #include #include #include #include #include #include "tap-interface.h" #include #include static struct ntdb_context *ntdb; void (*external_agent_free)(void *) = free; static enum NTDB_ERROR clear_if_first(int fd, void *arg) { /* We hold a lock offset 4 always, so we can tell if anyone is holding it. * (This is compatible with tdb's TDB_CLEAR_IF_FIRST flag). */ struct flock fl; fl.l_type = F_WRLCK; fl.l_whence = SEEK_SET; fl.l_start = 4; fl.l_len = 1; if (fcntl(fd, F_SETLK, &fl) == 0) { /* We must be first ones to open it! */ diag("agent truncating file!"); if (ftruncate(fd, 0) != 0) { return NTDB_ERR_IO; } } fl.l_type = F_RDLCK; if (fcntl(fd, F_SETLKW, &fl) != 0) { return NTDB_ERR_IO; } return NTDB_SUCCESS; } static enum agent_return do_operation(enum operation op, const char *name) { NTDB_DATA k, d; enum agent_return ret; NTDB_DATA data; enum NTDB_ERROR ecode; union ntdb_attribute cif; const char *eq; if (op != OPEN && op != OPEN_WITH_HOOK && !ntdb) { diag("external: No ntdb open!"); return OTHER_FAILURE; } diag("external: %s", operation_name(op)); eq = strchr(name, '='); if (eq) { k = ntdb_mkdata(name, eq - name); d = ntdb_mkdata(eq + 1, strlen(eq+1)); } else { k = ntdb_mkdata(name, strlen(name)); d.dsize = 0; d.dptr = NULL; } locking_would_block = 0; switch (op) { case OPEN: if (ntdb) { diag("Already have ntdb %s open", ntdb_name(ntdb)); return OTHER_FAILURE; } ntdb = ntdb_open(name, MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); if (!ntdb) { if (!locking_would_block) diag("Opening ntdb gave %s", strerror(errno)); forget_locking(); ret = OTHER_FAILURE; } else ret = SUCCESS; break; case OPEN_WITH_HOOK: if (ntdb) { diag("Already have ntdb %s open", ntdb_name(ntdb)); return OTHER_FAILURE; } cif.openhook.base.attr = NTDB_ATTRIBUTE_OPENHOOK; cif.openhook.base.next = &tap_log_attr; cif.openhook.fn = clear_if_first; ntdb = ntdb_open(name, MAYBE_NOSYNC, O_RDWR, 0, &cif); if (!ntdb) { if (!locking_would_block) diag("Opening ntdb gave %s", strerror(errno)); forget_locking(); ret = OTHER_FAILURE; } else ret = SUCCESS; break; case FETCH: ecode = ntdb_fetch(ntdb, k, &data); if (ecode == NTDB_ERR_NOEXIST) { ret = FAILED; } else if (ecode < 0) { ret = OTHER_FAILURE; } else if (!ntdb_deq(data, d)) { ret = OTHER_FAILURE; external_agent_free(data.dptr); } else { ret = SUCCESS; external_agent_free(data.dptr); } break; case STORE: ret = ntdb_store(ntdb, k, d, 0) == 0 ? SUCCESS : OTHER_FAILURE; break; case TRANSACTION_START: ret = ntdb_transaction_start(ntdb) == 0 ? SUCCESS : OTHER_FAILURE; break; case TRANSACTION_COMMIT: ret = ntdb_transaction_commit(ntdb)==0 ? SUCCESS : OTHER_FAILURE; break; case NEEDS_RECOVERY: ret = external_agent_needs_rec(ntdb); break; case CHECK: ret = ntdb_check(ntdb, NULL, NULL) == 0 ? SUCCESS : OTHER_FAILURE; break; case CLOSE: ret = ntdb_close(ntdb) == 0 ? SUCCESS : OTHER_FAILURE; ntdb = NULL; break; case SEND_SIGNAL: /* We do this async */ ret = SUCCESS; break; default: ret = OTHER_FAILURE; } if (locking_would_block) ret = WOULD_HAVE_BLOCKED; return ret; } struct agent { int cmdfd, responsefd; }; /* Do this before doing any ntdb stuff. Return handle, or NULL. */ struct agent *prepare_external_agent(void) { int pid, ret; int command[2], response[2]; char name[1+PATH_MAX]; if (pipe(command) != 0 || pipe(response) != 0) return NULL; pid = fork(); if (pid < 0) return NULL; if (pid != 0) { struct agent *agent = malloc(sizeof(*agent)); close(command[0]); close(response[1]); agent->cmdfd = command[1]; agent->responsefd = response[0]; return agent; } close(command[1]); close(response[0]); /* We want to fail, not block. */ nonblocking_locks = true; log_prefix = "external: "; while ((ret = read(command[0], name, sizeof(name))) > 0) { enum agent_return result; result = do_operation(name[0], name+1); if (write(response[1], &result, sizeof(result)) != sizeof(result)) err(1, "Writing response"); if (name[0] == SEND_SIGNAL) { struct timeval ten_ms; ten_ms.tv_sec = 0; ten_ms.tv_usec = 10000; select(0, NULL, NULL, NULL, &ten_ms); kill(getppid(), SIGUSR1); } } exit(0); } /* Ask the external agent to try to do an operation. */ enum agent_return external_agent_operation(struct agent *agent, enum operation op, const char *name) { enum agent_return res; unsigned int len; char *string; if (!name) name = ""; len = 1 + strlen(name) + 1; string = malloc(len); string[0] = op; strcpy(string+1, name); if (write(agent->cmdfd, string, len) != len || read(agent->responsefd, &res, sizeof(res)) != sizeof(res)) res = AGENT_DIED; free(string); return res; } const char *agent_return_name(enum agent_return ret) { return ret == SUCCESS ? "SUCCESS" : ret == WOULD_HAVE_BLOCKED ? "WOULD_HAVE_BLOCKED" : ret == AGENT_DIED ? "AGENT_DIED" : ret == FAILED ? "FAILED" : ret == OTHER_FAILURE ? "OTHER_FAILURE" : "**INVALID**"; } const char *operation_name(enum operation op) { switch (op) { case OPEN: return "OPEN"; case OPEN_WITH_HOOK: return "OPEN_WITH_HOOK"; case FETCH: return "FETCH"; case STORE: return "STORE"; case CHECK: return "CHECK"; case TRANSACTION_START: return "TRANSACTION_START"; case TRANSACTION_COMMIT: return "TRANSACTION_COMMIT"; case NEEDS_RECOVERY: return "NEEDS_RECOVERY"; case SEND_SIGNAL: return "SEND_SIGNAL"; case CLOSE: return "CLOSE"; } return "**INVALID**"; } void free_external_agent(struct agent *agent) { close(agent->cmdfd); close(agent->responsefd); free(agent); } ntdb-1.0/test/external-agent.h000066400000000000000000000023361224151530700163530ustar00rootroot00000000000000#ifndef NTDB_TEST_EXTERNAL_AGENT_H #define NTDB_TEST_EXTERNAL_AGENT_H /* For locking tests, we need a different process to try things at * various times. */ enum operation { OPEN, OPEN_WITH_HOOK, FETCH, STORE, TRANSACTION_START, TRANSACTION_COMMIT, NEEDS_RECOVERY, CHECK, SEND_SIGNAL, CLOSE, }; /* Do this before doing any ntdb stuff. Return handle, or -1. */ struct agent *prepare_external_agent(void); enum agent_return { SUCCESS, WOULD_HAVE_BLOCKED, AGENT_DIED, FAILED, /* For fetch, or NEEDS_RECOVERY */ OTHER_FAILURE, }; /* Ask the external agent to try to do an operation. * name == ntdb name for OPEN/OPEN_WITH_CLEAR_IF_FIRST, * = for FETCH/STORE. */ enum agent_return external_agent_operation(struct agent *handle, enum operation op, const char *name); /* Hook into free() on ntdb_data in external agent. */ extern void (*external_agent_free)(void *); /* Mapping enum -> string. */ const char *agent_return_name(enum agent_return ret); const char *operation_name(enum operation op); void free_external_agent(struct agent *agent); /* Internal use: */ struct ntdb_context; enum agent_return external_agent_needs_rec(struct ntdb_context *ntdb); #endif /* NTDB_TEST_EXTERNAL_AGENT_H */ ntdb-1.0/test/failtest_helper.c000066400000000000000000000044661224151530700166100ustar00rootroot00000000000000#include "failtest_helper.h" #include "logging.h" #include #include "tap-interface.h" bool failtest_suppress = false; /* FIXME: From ccan/str */ static inline bool strends(const char *str, const char *postfix) { if (strlen(str) < strlen(postfix)) return false; return !strcmp(str + strlen(str) - strlen(postfix), postfix); } bool failmatch(const struct failtest_call *call, const char *file, int line, enum failtest_call_type type) { return call->type == type && call->line == line && ((strcmp(call->file, file) == 0) || (strends(call->file, file) && (call->file[strlen(call->file) - strlen(file) - 1] == '/'))); } static bool is_nonblocking_lock(const struct failtest_call *call) { return call->type == FAILTEST_FCNTL && call->u.fcntl.cmd == F_SETLK; } static bool is_unlock(const struct failtest_call *call) { return call->type == FAILTEST_FCNTL && call->u.fcntl.arg.fl.l_type == F_UNLCK; } bool exit_check_log(struct tlist_calls *history) { const struct failtest_call *i; unsigned int malloc_count = 0; tlist_for_each(history, i, list) { if (!i->fail) continue; /* Failing the /dev/urandom open doesn't count: we fall back. */ if (failmatch(i, URANDOM_OPEN)) continue; /* Similarly with read fail. */ if (failmatch(i, URANDOM_READ)) continue; /* Initial allocation of ntdb doesn't log. */ if (i->type == FAILTEST_MALLOC) { if (malloc_count++ == 0) { continue; } } /* We don't block "failures" on non-blocking locks. */ if (is_nonblocking_lock(i)) continue; if (!tap_log_messages) diag("We didn't log for %s:%u", i->file, i->line); return tap_log_messages != 0; } return true; } /* Some places we soldier on despite errors: only fail them once. */ enum failtest_result block_repeat_failures(struct tlist_calls *history) { const struct failtest_call *last; last = tlist_tail(history, list); if (failtest_suppress) return FAIL_DONT_FAIL; if (failmatch(last, URANDOM_OPEN) || failmatch(last, URANDOM_READ)) { return FAIL_PROBE; } /* We handle mmap failing, by falling back to read/write, so * don't try all possible paths. */ if (last->type == FAILTEST_MMAP) return FAIL_PROBE; /* Unlock or non-blocking lock is fail-once. */ if (is_unlock(last) || is_nonblocking_lock(last)) return FAIL_PROBE; return FAIL_OK; } ntdb-1.0/test/failtest_helper.h000066400000000000000000000011261224151530700166030ustar00rootroot00000000000000#ifndef NTDB_TEST_FAILTEST_HELPER_H #define NTDB_TEST_FAILTEST_HELPER_H #include #include /* FIXME: Check these! */ #define URANDOM_OPEN "open.c", 62, FAILTEST_OPEN #define URANDOM_READ "open.c", 42, FAILTEST_READ bool exit_check_log(struct tlist_calls *history); bool failmatch(const struct failtest_call *call, const char *file, int line, enum failtest_call_type type); enum failtest_result block_repeat_failures(struct tlist_calls *history); /* Set this to suppress failure. */ extern bool failtest_suppress; #endif /* NTDB_TEST_LOGGING_H */ ntdb-1.0/test/helpapi-external-agent.c000066400000000000000000000003001224151530700177530ustar00rootroot00000000000000#include "external-agent.h" /* This isn't possible with via the ntdb API, but this makes it link. */ enum agent_return external_agent_needs_rec(struct ntdb_context *ntdb) { return FAILED; } ntdb-1.0/test/helprun-external-agent.c000066400000000000000000000002621224151530700200150ustar00rootroot00000000000000#include "external-agent.h" #include "private.h" enum agent_return external_agent_needs_rec(struct ntdb_context *ntdb) { return ntdb_needs_recovery(ntdb) ? SUCCESS : FAILED; } ntdb-1.0/test/helprun-layout.c000066400000000000000000000221141224151530700164140ustar00rootroot00000000000000/* NTDB tools to create various canned database layouts. */ #include "layout.h" #include #include #include #include #include "logging.h" struct ntdb_layout *new_ntdb_layout(void) { struct ntdb_layout *layout = malloc(sizeof(*layout)); layout->num_elems = 0; layout->elem = NULL; return layout; } static void add(struct ntdb_layout *layout, union ntdb_layout_elem elem) { layout->elem = realloc(layout->elem, sizeof(layout->elem[0]) * (layout->num_elems+1)); layout->elem[layout->num_elems++] = elem; } void ntdb_layout_add_freetable(struct ntdb_layout *layout) { union ntdb_layout_elem elem; elem.base.type = FREETABLE; add(layout, elem); } void ntdb_layout_add_free(struct ntdb_layout *layout, ntdb_len_t len, unsigned ftable) { union ntdb_layout_elem elem; elem.base.type = FREE; elem.free.len = len; elem.free.ftable_num = ftable; add(layout, elem); } void ntdb_layout_add_capability(struct ntdb_layout *layout, uint64_t type, bool write_breaks, bool check_breaks, bool open_breaks, ntdb_len_t extra) { union ntdb_layout_elem elem; elem.base.type = CAPABILITY; elem.capability.type = type; if (write_breaks) elem.capability.type |= NTDB_CAP_NOWRITE; if (open_breaks) elem.capability.type |= NTDB_CAP_NOOPEN; if (check_breaks) elem.capability.type |= NTDB_CAP_NOCHECK; elem.capability.extra = extra; add(layout, elem); } static NTDB_DATA dup_key(NTDB_DATA key) { NTDB_DATA ret; ret.dsize = key.dsize; ret.dptr = malloc(ret.dsize); memcpy(ret.dptr, key.dptr, ret.dsize); return ret; } void ntdb_layout_add_used(struct ntdb_layout *layout, NTDB_DATA key, NTDB_DATA data, ntdb_len_t extra) { union ntdb_layout_elem elem; elem.base.type = DATA; elem.used.key = dup_key(key); elem.used.data = dup_key(data); elem.used.extra = extra; add(layout, elem); } static ntdb_len_t free_record_len(ntdb_len_t len) { return sizeof(struct ntdb_used_record) + len; } static ntdb_len_t data_record_len(struct tle_used *used) { ntdb_len_t len; len = sizeof(struct ntdb_used_record) + used->key.dsize + used->data.dsize + used->extra; assert(len >= sizeof(struct ntdb_free_record)); return len; } static ntdb_len_t capability_len(struct tle_capability *cap) { return sizeof(struct ntdb_capability) + cap->extra; } static ntdb_len_t freetable_len(struct tle_freetable *ftable) { return sizeof(struct ntdb_freetable); } static void set_free_record(void *mem, ntdb_len_t len) { /* We do all the work in add_to_freetable */ } static void add_zero_pad(struct ntdb_used_record *u, size_t len, size_t extra) { if (extra) ((char *)(u + 1))[len] = '\0'; } static void set_data_record(void *mem, struct ntdb_context *ntdb, struct tle_used *used) { struct ntdb_used_record *u = mem; set_header(ntdb, u, NTDB_USED_MAGIC, used->key.dsize, used->data.dsize, used->key.dsize + used->data.dsize + used->extra); memcpy(u + 1, used->key.dptr, used->key.dsize); memcpy((char *)(u + 1) + used->key.dsize, used->data.dptr, used->data.dsize); add_zero_pad(u, used->key.dsize + used->data.dsize, used->extra); } static void set_capability(void *mem, struct ntdb_context *ntdb, struct tle_capability *cap, struct ntdb_header *hdr, ntdb_off_t last_cap) { struct ntdb_capability *c = mem; ntdb_len_t len = sizeof(*c) - sizeof(struct ntdb_used_record) + cap->extra; c->type = cap->type; c->next = 0; set_header(ntdb, &c->hdr, NTDB_CAP_MAGIC, 0, len, len); /* Append to capability list. */ if (!last_cap) { hdr->capabilities = cap->base.off; } else { c = (struct ntdb_capability *)((char *)hdr + last_cap); c->next = cap->base.off; } } static void set_freetable(void *mem, struct ntdb_context *ntdb, struct tle_freetable *freetable, struct ntdb_header *hdr, ntdb_off_t last_ftable) { struct ntdb_freetable *ftable = mem; memset(ftable, 0, sizeof(*ftable)); set_header(ntdb, &ftable->hdr, NTDB_FTABLE_MAGIC, 0, sizeof(*ftable) - sizeof(ftable->hdr), sizeof(*ftable) - sizeof(ftable->hdr)); if (last_ftable) { ftable = (struct ntdb_freetable *)((char *)hdr + last_ftable); ftable->next = freetable->base.off; } else { hdr->free_table = freetable->base.off; } } static void add_to_freetable(struct ntdb_context *ntdb, ntdb_off_t eoff, ntdb_off_t elen, unsigned ftable, struct tle_freetable *freetable) { ntdb->ftable_off = freetable->base.off; ntdb->ftable = ftable; add_free_record(ntdb, eoff, sizeof(struct ntdb_used_record) + elen, NTDB_LOCK_WAIT, false); } /* Get bits from a value. */ static uint32_t bits(uint64_t val, unsigned start, unsigned num) { assert(num <= 32); return (val >> start) & ((1U << num) - 1); } static ntdb_off_t encode_offset(const struct ntdb_context *ntdb, ntdb_off_t new_off, uint32_t hash) { ntdb_off_t extra; assert((new_off & (1ULL << NTDB_OFF_CHAIN_BIT)) == 0); assert((new_off >> (64 - NTDB_OFF_UPPER_STEAL)) == 0); /* We pack extra hash bits into the upper bits of the offset. */ extra = bits(hash, ntdb->hash_bits, NTDB_OFF_UPPER_STEAL); extra <<= (64 - NTDB_OFF_UPPER_STEAL); return new_off | extra; } static ntdb_off_t hbucket_off(ntdb_len_t idx) { return sizeof(struct ntdb_header) + sizeof(struct ntdb_used_record) + idx * sizeof(ntdb_off_t); } /* FIXME: Our hash table handling here is primitive: we don't expand! */ static void add_to_hashtable(struct ntdb_context *ntdb, ntdb_off_t eoff, NTDB_DATA key) { ntdb_off_t b_off; uint32_t h = ntdb_hash(ntdb, key.dptr, key.dsize); b_off = hbucket_off(h & ((1 << ntdb->hash_bits)-1)); if (ntdb_read_off(ntdb, b_off) != 0) abort(); ntdb_write_off(ntdb, b_off, encode_offset(ntdb, eoff, h)); } static struct tle_freetable *find_ftable(struct ntdb_layout *layout, unsigned num) { unsigned i; for (i = 0; i < layout->num_elems; i++) { if (layout->elem[i].base.type != FREETABLE) continue; if (num == 0) return &layout->elem[i].ftable; num--; } abort(); } /* FIXME: Support NTDB_CONVERT */ struct ntdb_context *ntdb_layout_get(struct ntdb_layout *layout, void (*freefn)(void *), union ntdb_attribute *attr) { unsigned int i; ntdb_off_t off, hdrlen, len, last_ftable, last_cap; char *mem; struct ntdb_context *ntdb; /* Now populate our header, cribbing from a real NTDB header. */ ntdb = ntdb_open("layout", NTDB_INTERNAL, O_RDWR, 0, attr); off = sizeof(struct ntdb_header) + sizeof(struct ntdb_used_record) + (sizeof(ntdb_off_t) << ntdb->hash_bits); hdrlen = off; /* First pass of layout: calc lengths */ for (i = 0; i < layout->num_elems; i++) { union ntdb_layout_elem *e = &layout->elem[i]; e->base.off = off; switch (e->base.type) { case FREETABLE: len = freetable_len(&e->ftable); break; case FREE: len = free_record_len(e->free.len); break; case DATA: len = data_record_len(&e->used); break; case CAPABILITY: len = capability_len(&e->capability); break; default: abort(); } off += len; } mem = malloc(off); /* Fill with some weird pattern. */ memset(mem, 0x99, off); memcpy(mem, ntdb->file->map_ptr, hdrlen); /* Mug the ntdb we have to make it use this. */ freefn(ntdb->file->map_ptr); ntdb->file->map_ptr = mem; ntdb->file->map_size = off; last_ftable = 0; last_cap = 0; for (i = 0; i < layout->num_elems; i++) { union ntdb_layout_elem *e = &layout->elem[i]; switch (e->base.type) { case FREETABLE: set_freetable(mem + e->base.off, ntdb, &e->ftable, (struct ntdb_header *)mem, last_ftable); last_ftable = e->base.off; break; case FREE: set_free_record(mem + e->base.off, e->free.len); break; case DATA: set_data_record(mem + e->base.off, ntdb, &e->used); break; case CAPABILITY: set_capability(mem + e->base.off, ntdb, &e->capability, (struct ntdb_header *)mem, last_cap); last_cap = e->base.off; break; } } /* Must have a free table! */ assert(last_ftable); /* Now fill the free and hash tables. */ for (i = 0; i < layout->num_elems; i++) { union ntdb_layout_elem *e = &layout->elem[i]; switch (e->base.type) { case FREE: add_to_freetable(ntdb, e->base.off, e->free.len, e->free.ftable_num, find_ftable(layout, e->free.ftable_num)); break; case DATA: add_to_hashtable(ntdb, e->base.off, e->used.key); break; default: break; } } ntdb->ftable_off = find_ftable(layout, 0)->base.off; return ntdb; } void ntdb_layout_write(struct ntdb_layout *layout, void (*freefn)(void *), union ntdb_attribute *attr, const char *filename) { struct ntdb_context *ntdb = ntdb_layout_get(layout, freefn, attr); int fd; fd = open(filename, O_WRONLY|O_TRUNC|O_CREAT, 0600); if (fd < 0) err(1, "opening %s for writing", filename); if (write(fd, ntdb->file->map_ptr, ntdb->file->map_size) != ntdb->file->map_size) err(1, "writing %s", filename); close(fd); ntdb_close(ntdb); } void ntdb_layout_free(struct ntdb_layout *layout) { unsigned int i; for (i = 0; i < layout->num_elems; i++) { if (layout->elem[i].base.type == DATA) { free(layout->elem[i].used.key.dptr); free(layout->elem[i].used.data.dptr); } } free(layout->elem); free(layout); } ntdb-1.0/test/layout.h000066400000000000000000000036071224151530700147540ustar00rootroot00000000000000#ifndef NTDB_TEST_LAYOUT_H #define NTDB_TEST_LAYOUT_H #include "private.h" struct ntdb_layout *new_ntdb_layout(void); void ntdb_layout_add_freetable(struct ntdb_layout *layout); void ntdb_layout_add_free(struct ntdb_layout *layout, ntdb_len_t len, unsigned ftable); void ntdb_layout_add_used(struct ntdb_layout *layout, NTDB_DATA key, NTDB_DATA data, ntdb_len_t extra); void ntdb_layout_add_capability(struct ntdb_layout *layout, uint64_t type, bool write_breaks, bool check_breaks, bool open_breaks, ntdb_len_t extra); #if 0 /* FIXME: Allow allocation of subtables */ void ntdb_layout_add_hashtable(struct ntdb_layout *layout, int htable_parent, /* -1 == toplevel */ unsigned int bucket, ntdb_len_t extra); #endif /* freefn is needed if we're using failtest_free. */ struct ntdb_context *ntdb_layout_get(struct ntdb_layout *layout, void (*freefn)(void *), union ntdb_attribute *attr); void ntdb_layout_write(struct ntdb_layout *layout, void (*freefn)(void *), union ntdb_attribute *attr, const char *filename); void ntdb_layout_free(struct ntdb_layout *layout); enum layout_type { FREETABLE, FREE, DATA, CAPABILITY }; /* Shared by all union members. */ struct tle_base { enum layout_type type; ntdb_off_t off; }; struct tle_freetable { struct tle_base base; }; struct tle_free { struct tle_base base; ntdb_len_t len; unsigned ftable_num; }; struct tle_used { struct tle_base base; NTDB_DATA key; NTDB_DATA data; ntdb_len_t extra; }; struct tle_capability { struct tle_base base; uint64_t type; ntdb_len_t extra; }; union ntdb_layout_elem { struct tle_base base; struct tle_freetable ftable; struct tle_free free; struct tle_used used; struct tle_capability capability; }; struct ntdb_layout { unsigned int num_elems; union ntdb_layout_elem *elem; }; #endif /* NTDB_TEST_LAYOUT_H */ ntdb-1.0/test/lock-tracking.c000066400000000000000000000063471224151530700161660ustar00rootroot00000000000000/* We save the locks so we can reaquire them. */ #include "private.h" /* For NTDB_HASH_LOCK_START, etc. */ #include #include #include #include #include "tap-interface.h" #include "lock-tracking.h" struct lock { struct lock *next; unsigned int off; unsigned int len; int type; }; static struct lock *locks; int locking_errors = 0; bool suppress_lockcheck = false; bool nonblocking_locks; int locking_would_block = 0; void (*unlock_callback)(int fd); int fcntl_with_lockcheck(int fd, int cmd, ... /* arg */ ) { va_list ap; int ret, arg3; struct flock *fl; bool may_block = false; if (cmd != F_SETLK && cmd != F_SETLKW) { /* This may be totally bogus, but we don't know in general. */ va_start(ap, cmd); arg3 = va_arg(ap, int); va_end(ap); return fcntl(fd, cmd, arg3); } va_start(ap, cmd); fl = va_arg(ap, struct flock *); va_end(ap); if (cmd == F_SETLKW && nonblocking_locks) { cmd = F_SETLK; may_block = true; } ret = fcntl(fd, cmd, fl); /* Detect when we failed, but might have been OK if we waited. */ if (may_block && ret == -1 && (errno == EAGAIN || errno == EACCES)) { locking_would_block++; } if (fl->l_type == F_UNLCK) { struct lock **l; struct lock *old = NULL; for (l = &locks; *l; l = &(*l)->next) { if ((*l)->off == fl->l_start && (*l)->len == fl->l_len) { if (ret == 0) { old = *l; *l = (*l)->next; free(old); } break; } } if (!old && !suppress_lockcheck) { diag("Unknown unlock %u@%u - %i", (int)fl->l_len, (int)fl->l_start, ret); locking_errors++; } } else { struct lock *new, *i; unsigned int fl_end = fl->l_start + fl->l_len; if (fl->l_len == 0) fl_end = (unsigned int)-1; /* Check for overlaps: we shouldn't do this. */ for (i = locks; i; i = i->next) { unsigned int i_end = i->off + i->len; if (i->len == 0) i_end = (unsigned int)-1; if (fl->l_start >= i->off && fl->l_start < i_end) break; if (fl_end > i->off && fl_end < i_end) break; /* ntdb_allrecord_lock does this, handle adjacent: */ if (fl->l_start > NTDB_HASH_LOCK_START && fl->l_start == i_end && fl->l_type == i->type) { if (ret == 0) { i->len = fl->l_len ? i->len + fl->l_len : 0; } goto done; } } if (i) { /* Special case: upgrade of allrecord lock. */ if (i->type == F_RDLCK && fl->l_type == F_WRLCK && i->off == NTDB_HASH_LOCK_START && fl->l_start == NTDB_HASH_LOCK_START && i->len == 0 && fl->l_len == 0) { if (ret == 0) i->type = F_WRLCK; goto done; } if (!suppress_lockcheck) { diag("%s lock %u@%u overlaps %u@%u", fl->l_type == F_WRLCK ? "write" : "read", (int)fl->l_len, (int)fl->l_start, i->len, (int)i->off); locking_errors++; } } if (ret == 0) { new = malloc(sizeof *new); new->off = fl->l_start; new->len = fl->l_len; new->type = fl->l_type; new->next = locks; locks = new; } } done: if (ret == 0 && fl->l_type == F_UNLCK && unlock_callback) unlock_callback(fd); return ret; } unsigned int forget_locking(void) { unsigned int num = 0; while (locks) { struct lock *next = locks->next; free(locks); locks = next; num++; } return num; } ntdb-1.0/test/lock-tracking.h000066400000000000000000000012511224151530700161600ustar00rootroot00000000000000#ifndef LOCK_TRACKING_H #define LOCK_TRACKING_H #include /* Set this if you want a callback after fnctl unlock. */ extern void (*unlock_callback)(int fd); /* Replacement fcntl. */ int fcntl_with_lockcheck(int fd, int cmd, ... /* arg */ ); /* Discard locking info: returns number of locks outstanding. */ unsigned int forget_locking(void); /* Number of errors in locking. */ extern int locking_errors; /* Suppress lock checking. */ extern bool suppress_lockcheck; /* Make all locks non-blocking. */ extern bool nonblocking_locks; /* Number of times we failed a lock because we made it non-blocking. */ extern int locking_would_block; #endif /* LOCK_TRACKING_H */ ntdb-1.0/test/logging.c000066400000000000000000000012141224151530700150500ustar00rootroot00000000000000#include #include #include "tap-interface.h" #include "logging.h" unsigned tap_log_messages; const char *log_prefix = ""; char *log_last = NULL; bool suppress_logging; union ntdb_attribute tap_log_attr = { .log = { .base = { .attr = NTDB_ATTRIBUTE_LOG }, .fn = tap_log_fn } }; void tap_log_fn(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *priv) { if (suppress_logging) return; diag("ntdb log level %u: %s: %s%s", level, ntdb_errorstr(ecode), log_prefix, message); if (log_last) free(log_last); log_last = strdup(message); tap_log_messages++; } ntdb-1.0/test/logging.h000066400000000000000000000006731224151530700150650ustar00rootroot00000000000000#ifndef NTDB_TEST_LOGGING_H #define NTDB_TEST_LOGGING_H #include "ntdb.h" #include #include extern bool suppress_logging; extern const char *log_prefix; extern unsigned tap_log_messages; extern union ntdb_attribute tap_log_attr; extern char *log_last; void tap_log_fn(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *priv); #endif /* NTDB_TEST_LOGGING_H */ ntdb-1.0/test/no-fsync.h000066400000000000000000000003161224151530700151650ustar00rootroot00000000000000#ifndef NTDB_NO_FSYNC_H #define NTDB_NO_FSYNC_H /* Obey $TDB_NO_FSYNC, a bit like tdb does (only note our NTDB_NOSYNC * does less) */ #define MAYBE_NOSYNC (getenv("TDB_NO_FSYNC") ? NTDB_NOSYNC : 0) #endif ntdb-1.0/test/ntdb-source.h000066400000000000000000000003251224151530700156560ustar00rootroot00000000000000#include "config.h" #include "check.c" #include "free.c" #include "hash.c" #include "io.c" #include "lock.c" #include "open.c" #include "summary.c" #include "ntdb.c" #include "transaction.c" #include "traverse.c" ntdb-1.0/test/run-001-encode.c000066400000000000000000000021701224151530700157610ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_used_record rec; struct ntdb_context ntdb = { .log_fn = tap_log_fn }; plan_tests(64 + 32 + 48*5 + 1); /* We should be able to encode any data value. */ for (i = 0; i < 64; i++) ok1(set_header(&ntdb, &rec, NTDB_USED_MAGIC, 0, 1ULL << i, 1ULL << i) == 0); /* And any key and data with < 64 bits between them. */ for (i = 0; i < 32; i++) { ntdb_len_t dlen = 1ULL >> (63 - i), klen = 1ULL << i; ok1(set_header(&ntdb, &rec, NTDB_USED_MAGIC, klen, dlen, klen + dlen) == 0); } /* We should neatly encode all values. */ for (i = 0; i < 48; i++) { uint64_t klen = 1ULL << (i < 16 ? i : 15); uint64_t dlen = 1ULL << i; uint64_t xlen = 1ULL << (i < 32 ? i : 31); ok1(set_header(&ntdb, &rec, NTDB_USED_MAGIC, klen, dlen, klen+dlen+xlen) == 0); ok1(rec_key_length(&rec) == klen); ok1(rec_data_length(&rec) == dlen); ok1(rec_extra_padding(&rec) == xlen); ok1(rec_magic(&rec) == NTDB_USED_MAGIC); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/run-001-fls.c000066400000000000000000000010751224151530700153130ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" static unsigned int dumb_fls(uint64_t num) { int i; for (i = 63; i >= 0; i--) { if (num & (1ULL << i)) break; } return i + 1; } int main(int argc, char *argv[]) { unsigned int i, j; plan_tests(64 * 64 + 2); ok1(fls64(0) == 0); ok1(dumb_fls(0) == 0); for (i = 0; i < 64; i++) { for (j = 0; j < 64; j++) { uint64_t val = (1ULL << i) | (1ULL << j); ok(fls64(val) == dumb_fls(val), "%llu -> %u should be %u", (long long)val, fls64(val), dumb_fls(val)); } } return exit_status(); } ntdb-1.0/test/run-01-new_database.c000066400000000000000000000017321224151530700170640ustar00rootroot00000000000000#include #include "ntdb-source.h" #include "tap-interface.h" #include #include "logging.h" #include "failtest_helper.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; failtest_init(argc, argv); failtest_hook = block_repeat_failures; failtest_exit_check = exit_check_log; plan_tests(sizeof(flags) / sizeof(flags[0]) * 3); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-new_database.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (!ok1(ntdb)) failtest_exit(exit_status()); failtest_suppress = true; ok1(ntdb_check(ntdb, NULL, NULL) == 0); failtest_suppress = false; ntdb_close(ntdb); if (!ok1(tap_log_messages == 0)) break; } failtest_exit(exit_status()); } ntdb-1.0/test/run-02-expand.c000066400000000000000000000032751224151530700157330ustar00rootroot00000000000000#include #include "ntdb-source.h" #include "tap-interface.h" #include #include "logging.h" #include "failtest_helper.h" int main(int argc, char *argv[]) { unsigned int i; uint64_t val; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 11 + 1); failtest_init(argc, argv); failtest_hook = block_repeat_failures; failtest_exit_check = exit_check_log; for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { failtest_suppress = true; ntdb = ntdb_open("run-expand.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (!ok1(ntdb)) break; val = ntdb->file->map_size; /* Need some hash lock for expand. */ ok1(ntdb_lock_hash(ntdb, 0, F_WRLCK) == 0); failtest_suppress = false; if (!ok1(ntdb_expand(ntdb, 1) == 0)) { failtest_suppress = true; ntdb_close(ntdb); break; } failtest_suppress = true; ok1(ntdb->file->map_size >= val + 1 * NTDB_EXTENSION_FACTOR); ok1(ntdb_unlock_hash(ntdb, 0, F_WRLCK) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); val = ntdb->file->map_size; ok1(ntdb_lock_hash(ntdb, 0, F_WRLCK) == 0); failtest_suppress = false; if (!ok1(ntdb_expand(ntdb, 1024) == 0)) { failtest_suppress = true; ntdb_close(ntdb); break; } failtest_suppress = true; ok1(ntdb_unlock_hash(ntdb, 0, F_WRLCK) == 0); ok1(ntdb->file->map_size >= val + 1024 * NTDB_EXTENSION_FACTOR); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); } ok1(tap_log_messages == 0); failtest_exit(exit_status()); } ntdb-1.0/test/run-03-coalesce.c000066400000000000000000000153611224151530700162320ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" #include "layout.h" static ntdb_len_t free_record_length(struct ntdb_context *ntdb, ntdb_off_t off) { struct ntdb_free_record f; enum NTDB_ERROR ecode; ecode = ntdb_read_convert(ntdb, off, &f, sizeof(f)); if (ecode != NTDB_SUCCESS) return ecode; if (frec_magic(&f) != NTDB_FREE_MAGIC) return NTDB_ERR_CORRUPT; return frec_len(&f); } int main(int argc, char *argv[]) { ntdb_off_t b_off, test; struct ntdb_context *ntdb; struct ntdb_layout *layout; NTDB_DATA data, key; ntdb_len_t len; /* FIXME: Test NTDB_CONVERT */ /* FIXME: Test lock order fail. */ plan_tests(42); data = ntdb_mkdata("world", 5); key = ntdb_mkdata("hello", 5); /* No coalescing can be done due to EOF */ layout = new_ntdb_layout(); ntdb_layout_add_freetable(layout); len = 15560; ntdb_layout_add_free(layout, len, 0); ntdb_layout_write(layout, free, &tap_log_attr, "run-03-coalesce.ntdb"); /* NOMMAP is for lockcheck. */ ntdb = ntdb_open("run-03-coalesce.ntdb", NTDB_NOMMAP|MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(free_record_length(ntdb, layout->elem[1].base.off) == len); /* Figure out which bucket free entry is. */ b_off = bucket_off(ntdb->ftable_off, size_to_bucket(len)); /* Lock and fail to coalesce. */ ok1(ntdb_lock_free_bucket(ntdb, b_off, NTDB_LOCK_WAIT) == 0); test = layout->elem[1].base.off; ok1(coalesce(ntdb, layout->elem[1].base.off, b_off, len, &test) == 0); ntdb_unlock_free_bucket(ntdb, b_off); ok1(free_record_length(ntdb, layout->elem[1].base.off) == len); ok1(test == layout->elem[1].base.off); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); ntdb_layout_free(layout); /* No coalescing can be done due to used record */ layout = new_ntdb_layout(); ntdb_layout_add_freetable(layout); ntdb_layout_add_free(layout, 15528, 0); ntdb_layout_add_used(layout, key, data, 6); ntdb_layout_write(layout, free, &tap_log_attr, "run-03-coalesce.ntdb"); /* NOMMAP is for lockcheck. */ ntdb = ntdb_open("run-03-coalesce.ntdb", NTDB_NOMMAP|MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); ok1(free_record_length(ntdb, layout->elem[1].base.off) == 15528); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Figure out which bucket free entry is. */ b_off = bucket_off(ntdb->ftable_off, size_to_bucket(15528)); /* Lock and fail to coalesce. */ ok1(ntdb_lock_free_bucket(ntdb, b_off, NTDB_LOCK_WAIT) == 0); test = layout->elem[1].base.off; ok1(coalesce(ntdb, layout->elem[1].base.off, b_off, 15528, &test) == 0); ntdb_unlock_free_bucket(ntdb, b_off); ok1(free_record_length(ntdb, layout->elem[1].base.off) == 15528); ok1(test == layout->elem[1].base.off); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); ntdb_layout_free(layout); /* Coalescing can be done due to two free records, then EOF */ layout = new_ntdb_layout(); ntdb_layout_add_freetable(layout); ntdb_layout_add_free(layout, 1024, 0); ntdb_layout_add_free(layout, 14520, 0); ntdb_layout_write(layout, free, &tap_log_attr, "run-03-coalesce.ntdb"); /* NOMMAP is for lockcheck. */ ntdb = ntdb_open("run-03-coalesce.ntdb", NTDB_NOMMAP|MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); ok1(free_record_length(ntdb, layout->elem[1].base.off) == 1024); ok1(free_record_length(ntdb, layout->elem[2].base.off) == 14520); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Figure out which bucket (first) free entry is. */ b_off = bucket_off(ntdb->ftable_off, size_to_bucket(1024)); /* Lock and coalesce. */ ok1(ntdb_lock_free_bucket(ntdb, b_off, NTDB_LOCK_WAIT) == 0); test = layout->elem[2].base.off; ok1(coalesce(ntdb, layout->elem[1].base.off, b_off, 1024, &test) == 1024 + sizeof(struct ntdb_used_record) + 14520); /* Should tell us it's erased this one... */ ok1(test == NTDB_ERR_NOEXIST); ok1(ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0); ok1(free_record_length(ntdb, layout->elem[1].base.off) == 1024 + sizeof(struct ntdb_used_record) + 14520); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); ntdb_layout_free(layout); /* Coalescing can be done due to two free records, then data */ layout = new_ntdb_layout(); ntdb_layout_add_freetable(layout); ntdb_layout_add_free(layout, 1024, 0); ntdb_layout_add_free(layout, 14488, 0); ntdb_layout_add_used(layout, key, data, 6); ntdb_layout_write(layout, free, &tap_log_attr, "run-03-coalesce.ntdb"); /* NOMMAP is for lockcheck. */ ntdb = ntdb_open("run-03-coalesce.ntdb", NTDB_NOMMAP|MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); ok1(free_record_length(ntdb, layout->elem[1].base.off) == 1024); ok1(free_record_length(ntdb, layout->elem[2].base.off) == 14488); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Figure out which bucket free entry is. */ b_off = bucket_off(ntdb->ftable_off, size_to_bucket(1024)); /* Lock and coalesce. */ ok1(ntdb_lock_free_bucket(ntdb, b_off, NTDB_LOCK_WAIT) == 0); test = layout->elem[2].base.off; ok1(coalesce(ntdb, layout->elem[1].base.off, b_off, 1024, &test) == 1024 + sizeof(struct ntdb_used_record) + 14488); ok1(ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0); ok1(free_record_length(ntdb, layout->elem[1].base.off) == 1024 + sizeof(struct ntdb_used_record) + 14488); ok1(test == NTDB_ERR_NOEXIST); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); ntdb_layout_free(layout); /* Coalescing can be done due to three free records, then EOF */ layout = new_ntdb_layout(); ntdb_layout_add_freetable(layout); ntdb_layout_add_free(layout, 1024, 0); ntdb_layout_add_free(layout, 512, 0); ntdb_layout_add_free(layout, 13992, 0); ntdb_layout_write(layout, free, &tap_log_attr, "run-03-coalesce.ntdb"); /* NOMMAP is for lockcheck. */ ntdb = ntdb_open("run-03-coalesce.ntdb", NTDB_NOMMAP|MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); ok1(free_record_length(ntdb, layout->elem[1].base.off) == 1024); ok1(free_record_length(ntdb, layout->elem[2].base.off) == 512); ok1(free_record_length(ntdb, layout->elem[3].base.off) == 13992); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Figure out which bucket free entry is. */ b_off = bucket_off(ntdb->ftable_off, size_to_bucket(1024)); /* Lock and coalesce. */ ok1(ntdb_lock_free_bucket(ntdb, b_off, NTDB_LOCK_WAIT) == 0); test = layout->elem[2].base.off; ok1(coalesce(ntdb, layout->elem[1].base.off, b_off, 1024, &test) == 1024 + sizeof(struct ntdb_used_record) + 512 + sizeof(struct ntdb_used_record) + 13992); ok1(ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0); ok1(free_record_length(ntdb, layout->elem[1].base.off) == 1024 + sizeof(struct ntdb_used_record) + 512 + sizeof(struct ntdb_used_record) + 13992); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); ntdb_layout_free(layout); ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/run-04-basichash.c000066400000000000000000000261001224151530700163730ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" /* We rig the hash so all records clash. */ static uint32_t clash(const void *key, size_t len, uint32_t seed, void *priv) { return *((const unsigned int *)key) << 20; } int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; unsigned int v; struct ntdb_used_record rec; NTDB_DATA key = { (unsigned char *)&v, sizeof(v) }; NTDB_DATA dbuf = { (unsigned char *)&v, sizeof(v) }; union ntdb_attribute hattr = { .hash = { .base = { NTDB_ATTRIBUTE_HASH }, .fn = clash } }; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT, }; hattr.base.next = &tap_log_attr; plan_tests(sizeof(flags) / sizeof(flags[0]) * 137 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { struct hash_info h; ntdb_off_t new_off, new_off2, off; ntdb = ntdb_open("run-04-basichash.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &hattr); ok1(ntdb); if (!ntdb) continue; v = 0; /* Should not find it. */ ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == 0); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have located space in top table, bucket 0. */ ok1(h.table == NTDB_HASH_OFFSET); ok1(h.table_size == (1 << ntdb->hash_bits)); ok1(h.bucket == 0); ok1(h.old_val == 0); /* Should have lock on bucket 0 */ ok1(h.h == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); /* FIXME: Check lock length */ /* Allocate a new record. */ new_off = alloc(ntdb, key.dsize, dbuf.dsize, NTDB_USED_MAGIC, false); ok1(!NTDB_OFF_IS_ERR(new_off)); /* We should be able to add it now. */ ok1(add_to_hash(ntdb, &h, new_off) == 0); /* Make sure we fill it in for later finding. */ off = new_off + sizeof(struct ntdb_used_record); ok1(!ntdb->io->twrite(ntdb, off, key.dptr, key.dsize)); off += key.dsize; ok1(!ntdb->io->twrite(ntdb, off, dbuf.dptr, dbuf.dsize)); /* We should be able to unlock that OK. */ ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); /* Database should be consistent. */ ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Now, this should give a successful lookup. */ ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == new_off); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have located it in top table, bucket 0. */ ok1(h.table == NTDB_HASH_OFFSET); ok1(h.table_size == (1 << ntdb->hash_bits)); ok1(h.bucket == 0); /* Should have lock on bucket 0 */ ok1(h.h == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); /* FIXME: Check lock length */ ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); /* Database should be consistent. */ ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Test expansion. */ v = 1; ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == 0); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have located clash in toplevel bucket 0. */ ok1(h.table == NTDB_HASH_OFFSET); ok1(h.table_size == (1 << ntdb->hash_bits)); ok1(h.bucket == 0); ok1((h.old_val & NTDB_OFF_MASK) == new_off); /* Should have lock on bucket 0 */ ok1((h.h & ((1 << ntdb->hash_bits)-1)) == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); /* FIXME: Check lock length */ new_off2 = alloc(ntdb, key.dsize, dbuf.dsize, NTDB_USED_MAGIC, false); ok1(!NTDB_OFF_IS_ERR(new_off2)); off = new_off2 + sizeof(struct ntdb_used_record); ok1(!ntdb->io->twrite(ntdb, off, key.dptr, key.dsize)); off += key.dsize; ok1(!ntdb->io->twrite(ntdb, off, dbuf.dptr, dbuf.dsize)); /* We should be able to add it now. */ ok1(add_to_hash(ntdb, &h, new_off2) == 0); ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); /* Should be happy with expansion. */ ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Should be able to find both. */ v = 1; ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == new_off2); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have located space in chain. */ ok1(h.table > NTDB_HASH_OFFSET); ok1(h.table_size == 2); ok1(h.bucket == 1); /* Should have lock on bucket 0 */ ok1((h.h & ((1 << ntdb->hash_bits)-1)) == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); v = 0; ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == new_off); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have located space in chain. */ ok1(h.table > NTDB_HASH_OFFSET); ok1(h.table_size == 2); ok1(h.bucket == 0); /* Should have lock on bucket 0 */ ok1((h.h & ((1 << ntdb->hash_bits)-1)) == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); /* FIXME: Check lock length */ /* Simple delete should work. */ ok1(delete_from_hash(ntdb, &h) == 0); ok1(add_free_record(ntdb, new_off, sizeof(struct ntdb_used_record) + rec_key_length(&rec) + rec_data_length(&rec) + rec_extra_padding(&rec), NTDB_LOCK_NOWAIT, false) == 0); ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Should still be able to find other record. */ v = 1; ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == new_off2); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have located space in chain. */ ok1(h.table > NTDB_HASH_OFFSET); ok1(h.table_size == 2); ok1(h.bucket == 1); /* Should have lock on bucket 0 */ ok1((h.h & ((1 << ntdb->hash_bits)-1)) == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); /* Now should find empty space. */ v = 0; ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == 0); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have located space in chain, bucket 0. */ ok1(h.table > NTDB_HASH_OFFSET); ok1(h.table_size == 2); ok1(h.bucket == 0); ok1(h.old_val == 0); /* Adding another record should work. */ v = 2; ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == 0); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have located space in chain, bucket 0. */ ok1(h.table > NTDB_HASH_OFFSET); ok1(h.table_size == 2); ok1(h.bucket == 0); ok1(h.old_val == 0); /* Should have lock on bucket 0 */ ok1((h.h & ((1 << ntdb->hash_bits)-1)) == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); new_off = alloc(ntdb, key.dsize, dbuf.dsize, NTDB_USED_MAGIC, false); ok1(!NTDB_OFF_IS_ERR(new_off2)); ok1(add_to_hash(ntdb, &h, new_off) == 0); ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); off = new_off + sizeof(struct ntdb_used_record); ok1(!ntdb->io->twrite(ntdb, off, key.dptr, key.dsize)); off += key.dsize; ok1(!ntdb->io->twrite(ntdb, off, dbuf.dptr, dbuf.dsize)); /* Adding another record should cause expansion. */ v = 3; ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == 0); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should not have located space in chain. */ ok1(h.table > NTDB_HASH_OFFSET); ok1(h.table_size == 2); ok1(h.bucket == 2); ok1(h.old_val != 0); /* Should have lock on bucket 0 */ ok1((h.h & ((1 << ntdb->hash_bits)-1)) == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); new_off = alloc(ntdb, key.dsize, dbuf.dsize, NTDB_USED_MAGIC, false); ok1(!NTDB_OFF_IS_ERR(new_off2)); off = new_off + sizeof(struct ntdb_used_record); ok1(!ntdb->io->twrite(ntdb, off, key.dptr, key.dsize)); off += key.dsize; ok1(!ntdb->io->twrite(ntdb, off, dbuf.dptr, dbuf.dsize)); ok1(add_to_hash(ntdb, &h, new_off) == 0); ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); /* Retrieve it and check. */ ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == new_off); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have appended to chain, bucket 2. */ ok1(h.table > NTDB_HASH_OFFSET); ok1(h.table_size == 3); ok1(h.bucket == 2); /* Should have lock on bucket 0 */ ok1((h.h & ((1 << ntdb->hash_bits)-1)) == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); /* YA record: relocation. */ v = 4; ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == 0); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should not have located space in chain. */ ok1(h.table > NTDB_HASH_OFFSET); ok1(h.table_size == 3); ok1(h.bucket == 3); ok1(h.old_val != 0); /* Should have lock on bucket 0 */ ok1((h.h & ((1 << ntdb->hash_bits)-1)) == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); new_off = alloc(ntdb, key.dsize, dbuf.dsize, NTDB_USED_MAGIC, false); ok1(!NTDB_OFF_IS_ERR(new_off2)); off = new_off + sizeof(struct ntdb_used_record); ok1(!ntdb->io->twrite(ntdb, off, key.dptr, key.dsize)); off += key.dsize; ok1(!ntdb->io->twrite(ntdb, off, dbuf.dptr, dbuf.dsize)); ok1(add_to_hash(ntdb, &h, new_off) == 0); ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); /* Retrieve it and check. */ ok1(find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL) == new_off); /* Should have created correct hash. */ ok1(h.h == ntdb_hash(ntdb, key.dptr, key.dsize)); /* Should have appended to chain, bucket 2. */ ok1(h.table > NTDB_HASH_OFFSET); ok1(h.table_size == 4); ok1(h.bucket == 3); /* Should have lock on bucket 0 */ ok1((h.h & ((1 << ntdb->hash_bits)-1)) == 0); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->num_lockrecs == 1); ok1((ntdb->flags & NTDB_NOLOCK) || ntdb->file->lockrecs[0].off == NTDB_HASH_LOCK_START); ok1(ntdb_unlock_hash(ntdb, h.h, F_WRLCK) == 0); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/run-05-readonly-open.c000066400000000000000000000041041224151530700172230ustar00rootroot00000000000000#include #include "ntdb-source.h" #include "tap-interface.h" #include #include "logging.h" #include "failtest_helper.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4), d; union ntdb_attribute seed_attr; unsigned int msgs = 0; failtest_init(argc, argv); failtest_hook = block_repeat_failures; failtest_exit_check = exit_check_log; seed_attr.base.attr = NTDB_ATTRIBUTE_SEED; seed_attr.base.next = &tap_log_attr; seed_attr.seed.seed = 0; failtest_suppress = true; plan_tests(sizeof(flags) / sizeof(flags[0]) * 11); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-05-readonly-open.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &seed_attr); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ntdb_close(ntdb); failtest_suppress = false; ntdb = ntdb_open("run-05-readonly-open.ntdb", flags[i]|MAYBE_NOSYNC, O_RDONLY, 0600, &tap_log_attr); if (!ok1(ntdb)) break; ok1(tap_log_messages == msgs); /* Fetch should succeed, stores should fail. */ if (!ok1(ntdb_fetch(ntdb, key, &d) == 0)) goto fail; ok1(ntdb_deq(d, data)); free(d.dptr); if (!ok1(ntdb_store(ntdb, key, data, NTDB_MODIFY) == NTDB_ERR_RDONLY)) goto fail; ok1(tap_log_messages == ++msgs); if (!ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == NTDB_ERR_RDONLY)) goto fail; ok1(tap_log_messages == ++msgs); failtest_suppress = true; ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); ok1(tap_log_messages == msgs); /* SIGH: failtest bug, it doesn't save the ntdb file because * we have it read-only. If we go around again, it gets * changed underneath us and things get screwy. */ if (failtest_has_failed()) break; } failtest_exit(exit_status()); fail: failtest_suppress = true; ntdb_close(ntdb); failtest_exit(exit_status()); } ntdb-1.0/test/run-10-simple-store.c000066400000000000000000000032441224151530700170720ustar00rootroot00000000000000#include #include "ntdb-source.h" #include "tap-interface.h" #include #include "logging.h" #include "failtest_helper.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4); failtest_init(argc, argv); failtest_hook = block_repeat_failures; failtest_exit_check = exit_check_log; failtest_suppress = true; plan_tests(sizeof(flags) / sizeof(flags[0]) * 7 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-10-simple-store.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (!ok1(ntdb)) break; /* Modify should fail. */ failtest_suppress = false; if (!ok1(ntdb_store(ntdb, key, data, NTDB_MODIFY) == NTDB_ERR_NOEXIST)) goto fail; failtest_suppress = true; ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Insert should succeed. */ failtest_suppress = false; if (!ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0)) goto fail; failtest_suppress = true; ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Second insert should fail. */ failtest_suppress = false; if (!ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == NTDB_ERR_EXISTS)) goto fail; failtest_suppress = true; ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); } ok1(tap_log_messages == 0); failtest_exit(exit_status()); fail: failtest_suppress = true; ntdb_close(ntdb); failtest_exit(exit_status()); } ntdb-1.0/test/run-11-simple-fetch.c000066400000000000000000000032401224151530700170240ustar00rootroot00000000000000#include #include "ntdb-source.h" #include "tap-interface.h" #include #include "logging.h" #include "failtest_helper.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4); failtest_init(argc, argv); failtest_hook = block_repeat_failures; failtest_exit_check = exit_check_log; failtest_suppress = true; plan_tests(sizeof(flags) / sizeof(flags[0]) * 8 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-11-simple-fetch.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (ntdb) { NTDB_DATA d = { NULL, 0 }; /* Bogus GCC warning */ /* fetch should fail. */ failtest_suppress = false; if (!ok1(ntdb_fetch(ntdb, key, &d) == NTDB_ERR_NOEXIST)) goto fail; failtest_suppress = true; ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Insert should succeed. */ ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Fetch should now work. */ failtest_suppress = false; if (!ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS)) goto fail; failtest_suppress = true; ok1(ntdb_deq(d, data)); free(d.dptr); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); } } ok1(tap_log_messages == 0); failtest_exit(exit_status()); fail: failtest_suppress = true; ntdb_close(ntdb); failtest_exit(exit_status()); } ntdb-1.0/test/run-12-check.c000066400000000000000000000023131224151530700155220ustar00rootroot00000000000000#include "private.h" #include #include "ntdb-source.h" #include "tap-interface.h" #include #include "logging.h" #include "failtest_helper.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4); failtest_init(argc, argv); failtest_hook = block_repeat_failures; failtest_exit_check = exit_check_log; failtest_suppress = true; plan_tests(sizeof(flags) / sizeof(flags[0]) * 3 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-12-check.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); /* This is what we really want to test: ntdb_check(). */ failtest_suppress = false; if (!ok1(ntdb_check(ntdb, NULL, NULL) == 0)) goto fail; failtest_suppress = true; ntdb_close(ntdb); } ok1(tap_log_messages == 0); failtest_exit(exit_status()); fail: failtest_suppress = true; ntdb_close(ntdb); failtest_exit(exit_status()); } ntdb-1.0/test/run-15-append.c000066400000000000000000000070171224151530700157250ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include #include "logging.h" #define MAX_SIZE 13100 #define SIZE_STEP 131 static ntdb_off_t ntdb_offset(struct ntdb_context *ntdb, NTDB_DATA key) { ntdb_off_t off; struct ntdb_used_record urec; struct hash_info h; off = find_and_lock(ntdb, key, F_RDLCK, &h, &urec, NULL); if (NTDB_OFF_IS_ERR(off)) return 0; ntdb_unlock_hash(ntdb, h.h, F_RDLCK); return off; } int main(int argc, char *argv[]) { unsigned int i, j, moves; struct ntdb_context *ntdb; unsigned char *buffer; ntdb_off_t oldoff = 0, newoff; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data; buffer = malloc(MAX_SIZE); for (i = 0; i < MAX_SIZE; i++) buffer[i] = i; plan_tests(sizeof(flags) / sizeof(flags[0]) * ((3 + MAX_SIZE/SIZE_STEP * 5) * 2 + 7) + 1); /* Using ntdb_store. */ for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-append.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; moves = 0; for (j = 0; j < MAX_SIZE; j += SIZE_STEP) { data.dptr = buffer; data.dsize = j; ok1(ntdb_store(ntdb, key, data, NTDB_REPLACE) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_fetch(ntdb, key, &data) == NTDB_SUCCESS); ok1(data.dsize == j); ok1(memcmp(data.dptr, buffer, data.dsize) == 0); free(data.dptr); newoff = ntdb_offset(ntdb, key); if (newoff != oldoff) moves++; oldoff = newoff; } ok1(!ntdb->file || (ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0)); /* We should increase by 50% each time... */ ok(moves <= ilog64(j / SIZE_STEP)*2, "Moved %u times", moves); ntdb_close(ntdb); } /* Using ntdb_append. */ for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { size_t prev_len = 0; ntdb = ntdb_open("run-append.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; moves = 0; for (j = 0; j < MAX_SIZE; j += SIZE_STEP) { data.dptr = buffer + prev_len; data.dsize = j - prev_len; ok1(ntdb_append(ntdb, key, data) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_fetch(ntdb, key, &data) == NTDB_SUCCESS); ok1(data.dsize == j); ok1(memcmp(data.dptr, buffer, data.dsize) == 0); free(data.dptr); prev_len = data.dsize; newoff = ntdb_offset(ntdb, key); if (newoff != oldoff) moves++; oldoff = newoff; } ok1(!ntdb->file || (ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0)); /* We should increase by 50% each time... */ ok(moves <= ilog64(j / SIZE_STEP)*2, "Moved %u times", moves); ntdb_close(ntdb); } for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-append.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; /* Huge initial store. */ data.dptr = buffer; data.dsize = MAX_SIZE; ok1(ntdb_append(ntdb, key, data) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_fetch(ntdb, key, &data) == NTDB_SUCCESS); ok1(data.dsize == MAX_SIZE); ok1(memcmp(data.dptr, buffer, data.dsize) == 0); free(data.dptr); ok1(!ntdb->file || (ntdb->file->allrecord_lock.count == 0 && ntdb->file->num_lockrecs == 0)); ntdb_close(ntdb); } ok1(tap_log_messages == 0); free(buffer); return exit_status(); } ntdb-1.0/test/run-25-hashoverload.c000066400000000000000000000046031224151530700171340ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" #define OVERLOAD 100 static uint32_t badhash(const void *key, size_t len, uint32_t seed, void *priv) { return 0; } static int trav(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *p) { if (p) return ntdb_delete(ntdb, key); return 0; } int main(int argc, char *argv[]) { unsigned int i, j; struct ntdb_context *ntdb; NTDB_DATA key = { (unsigned char *)&j, sizeof(j) }; NTDB_DATA dbuf = { (unsigned char *)&j, sizeof(j) }; union ntdb_attribute hattr = { .hash = { .base = { NTDB_ATTRIBUTE_HASH }, .fn = badhash } }; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT, }; hattr.base.next = &tap_log_attr; plan_tests(sizeof(flags) / sizeof(flags[0]) * (7 * OVERLOAD + 11) + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { NTDB_DATA d = { NULL, 0 }; /* Bogus GCC warning */ ntdb = ntdb_open("run-25-hashoverload.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &hattr); ok1(ntdb); if (!ntdb) continue; /* Overload a bucket. */ for (j = 0; j < OVERLOAD; j++) { ok1(ntdb_store(ntdb, key, dbuf, NTDB_INSERT) == 0); } ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Check we can find them all. */ for (j = 0; j < OVERLOAD; j++) { ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == sizeof(j)); ok1(d.dptr != NULL); ok1(d.dptr && memcmp(d.dptr, &j, d.dsize) == 0); free(d.dptr); } /* Traverse through them. */ ok1(ntdb_traverse(ntdb, trav, NULL) == OVERLOAD); /* Delete the first 99. */ for (j = 0; j < OVERLOAD-1; j++) ok1(ntdb_delete(ntdb, key) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_fetch(ntdb, key, &d) == NTDB_SUCCESS); ok1(d.dsize == sizeof(j)); ok1(d.dptr != NULL); ok1(d.dptr && memcmp(d.dptr, &j, d.dsize) == 0); free(d.dptr); /* Traverse through them. */ ok1(ntdb_traverse(ntdb, trav, NULL) == 1); /* Re-add */ for (j = 0; j < OVERLOAD-1; j++) { ok1(ntdb_store(ntdb, key, dbuf, NTDB_INSERT) == 0); } ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Now try deleting as we go. */ ok1(ntdb_traverse(ntdb, trav, trav) == OVERLOAD); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb_traverse(ntdb, trav, NULL) == 0); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/run-30-exhaust-before-expand.c000066400000000000000000000036021224151530700206450ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" static bool empty_freetable(struct ntdb_context *ntdb) { struct ntdb_freetable ftab; unsigned int i; /* Now, free table should be completely exhausted in zone 0 */ if (ntdb_read_convert(ntdb, ntdb->ftable_off, &ftab, sizeof(ftab)) != 0) abort(); for (i = 0; i < sizeof(ftab.buckets)/sizeof(ftab.buckets[0]); i++) { if (ftab.buckets[i]) return false; } return true; } int main(int argc, char *argv[]) { unsigned int i, j; struct ntdb_context *ntdb; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 7 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { NTDB_DATA k, d; uint64_t size; bool was_empty = false; k.dptr = (void *)&j; k.dsize = sizeof(j); ntdb = ntdb_open("run-30-exhaust-before-expand.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* There's one empty record in initial db. */ ok1(!empty_freetable(ntdb)); size = ntdb->file->map_size; /* Create one record to chew up most space. */ d.dsize = size - NEW_DATABASE_HDR_SIZE(ntdb->hash_bits) - 32; d.dptr = calloc(d.dsize, 1); j = 0; ok1(ntdb_store(ntdb, k, d, NTDB_INSERT) == 0); ok1(ntdb->file->map_size == size); free(d.dptr); /* Now insert minimal-length records until we expand. */ for (j = 1; ntdb->file->map_size == size; j++) { was_empty = empty_freetable(ntdb); if (ntdb_store(ntdb, k, k, NTDB_INSERT) != 0) err(1, "Failed to store record %i", j); } /* Would have been empty before expansion, but no longer. */ ok1(was_empty); ok1(!empty_freetable(ntdb)); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/run-35-convert.c000066400000000000000000000034121224151530700161330ustar00rootroot00000000000000#include "private.h" #include #include "ntdb-source.h" #include "tap-interface.h" #include #include "logging.h" #include "failtest_helper.h" int main(int argc, char *argv[]) { unsigned int i, messages = 0; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; failtest_init(argc, argv); failtest_hook = block_repeat_failures; failtest_exit_check = exit_check_log; plan_tests(sizeof(flags) / sizeof(flags[0]) * 4); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-35-convert.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); if (!ok1(ntdb)) failtest_exit(exit_status()); ntdb_close(ntdb); /* We can fail in log message formatting or open. That's OK */ if (failtest_has_failed()) { failtest_exit(exit_status()); } /* If we say NTDB_CONVERT, it must be converted */ ntdb = ntdb_open("run-35-convert.ntdb", flags[i]|NTDB_CONVERT|MAYBE_NOSYNC, O_RDWR, 0600, &tap_log_attr); if (flags[i] & NTDB_CONVERT) { if (!ntdb) failtest_exit(exit_status()); ok1(ntdb_get_flags(ntdb) & NTDB_CONVERT); ntdb_close(ntdb); } else { if (!ok1(!ntdb && errno == EIO)) failtest_exit(exit_status()); ok1(tap_log_messages == ++messages); if (!ok1(log_last && strstr(log_last, "NTDB_CONVERT"))) failtest_exit(exit_status()); } /* If don't say NTDB_CONVERT, it *may* be converted */ ntdb = ntdb_open("run-35-convert.ntdb", (flags[i] & ~NTDB_CONVERT)|MAYBE_NOSYNC, O_RDWR, 0600, &tap_log_attr); if (!ntdb) failtest_exit(exit_status()); ok1(ntdb_get_flags(ntdb) == (flags[i]|MAYBE_NOSYNC)); ntdb_close(ntdb); } failtest_exit(exit_status()); } ntdb-1.0/test/run-50-multiple-freelists.c000066400000000000000000000040201224151530700202750ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" #include "layout.h" int main(int argc, char *argv[]) { ntdb_off_t off; struct ntdb_context *ntdb; struct ntdb_layout *layout; NTDB_DATA key, data; union ntdb_attribute seed; /* This seed value previously tickled a layout.c bug. */ seed.base.attr = NTDB_ATTRIBUTE_SEED; seed.seed.seed = 0xb1142bc054d035b4ULL; seed.base.next = &tap_log_attr; plan_tests(11); key = ntdb_mkdata("Hello", 5); data = ntdb_mkdata("world", 5); /* Create a NTDB with three free tables. */ layout = new_ntdb_layout(); ntdb_layout_add_freetable(layout); ntdb_layout_add_freetable(layout); ntdb_layout_add_freetable(layout); ntdb_layout_add_free(layout, 80, 0); /* Used record prevent coalescing. */ ntdb_layout_add_used(layout, key, data, 6); ntdb_layout_add_free(layout, 160, 1); key.dsize--; ntdb_layout_add_used(layout, key, data, 7); ntdb_layout_add_free(layout, 320, 2); key.dsize--; ntdb_layout_add_used(layout, key, data, 8); ntdb_layout_add_free(layout, 40, 0); ntdb = ntdb_layout_get(layout, free, &seed); ok1(ntdb_check(ntdb, NULL, NULL) == 0); off = get_free(ntdb, 0, 80 - sizeof(struct ntdb_used_record), 0, NTDB_USED_MAGIC); ok1(off == layout->elem[3].base.off); ok1(ntdb->ftable_off == layout->elem[0].base.off); off = get_free(ntdb, 0, 160 - sizeof(struct ntdb_used_record), 0, NTDB_USED_MAGIC); ok1(off == layout->elem[5].base.off); ok1(ntdb->ftable_off == layout->elem[1].base.off); off = get_free(ntdb, 0, 320 - sizeof(struct ntdb_used_record), 0, NTDB_USED_MAGIC); ok1(off == layout->elem[7].base.off); ok1(ntdb->ftable_off == layout->elem[2].base.off); off = get_free(ntdb, 0, 40 - sizeof(struct ntdb_used_record), 0, NTDB_USED_MAGIC); ok1(off == layout->elem[9].base.off); ok1(ntdb->ftable_off == layout->elem[0].base.off); /* Now we fail. */ off = get_free(ntdb, 0, 0, 1, NTDB_USED_MAGIC); ok1(off == 0); ntdb_close(ntdb); ntdb_layout_free(layout); ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/run-56-open-during-transaction.c000066400000000000000000000072021224151530700212310ustar00rootroot00000000000000#include "private.h" #include #include "lock-tracking.h" static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset); static ssize_t write_check(int fd, const void *buf, size_t count); static int ftruncate_check(int fd, off_t length); #define pwrite pwrite_check #define write write_check #define fcntl fcntl_with_lockcheck #define ftruncate ftruncate_check #include "ntdb-source.h" #include "tap-interface.h" #include #include #include #include "external-agent.h" #include "logging.h" static struct agent *agent; static bool opened; static int errors = 0; #define TEST_DBNAME "run-56-open-during-transaction.ntdb" #undef write #undef pwrite #undef fcntl #undef ftruncate static bool is_same(const char *snapshot, const char *latest, off_t len) { unsigned i; for (i = 0; i < len; i++) { if (snapshot[i] != latest[i]) return false; } return true; } static bool compare_file(int fd, const char *snapshot, off_t snapshot_len) { char *contents; bool ret; /* over-length read serves as length check. */ contents = malloc(snapshot_len+1); ret = pread(fd, contents, snapshot_len+1, 0) == snapshot_len && is_same(snapshot, contents, snapshot_len); free(contents); return ret; } static void check_file_intact(int fd) { enum agent_return ret; struct stat st; char *contents; fstat(fd, &st); contents = malloc(st.st_size); if (pread(fd, contents, st.st_size, 0) != st.st_size) { diag("Read fail"); errors++; return; } /* Ask agent to open file. */ ret = external_agent_operation(agent, OPEN, TEST_DBNAME); /* It's OK to open it, but it must not have changed! */ if (!compare_file(fd, contents, st.st_size)) { diag("Agent changed file after opening %s", agent_return_name(ret)); errors++; } if (ret == SUCCESS) { ret = external_agent_operation(agent, CLOSE, NULL); if (ret != SUCCESS) { diag("Agent failed to close ntdb: %s", agent_return_name(ret)); errors++; } } else if (ret != WOULD_HAVE_BLOCKED) { diag("Agent opening file gave %s", agent_return_name(ret)); errors++; } free(contents); } static void after_unlock(int fd) { if (opened) check_file_intact(fd); } static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset) { if (opened) check_file_intact(fd); return pwrite(fd, buf, count, offset); } static ssize_t write_check(int fd, const void *buf, size_t count) { if (opened) check_file_intact(fd); return write(fd, buf, count); } static int ftruncate_check(int fd, off_t length) { if (opened) check_file_intact(fd); return ftruncate(fd, length); } int main(int argc, char *argv[]) { const int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; int i; struct ntdb_context *ntdb; NTDB_DATA key, data; plan_tests(sizeof(flags)/sizeof(flags[0]) * 5); agent = prepare_external_agent(); if (!agent) err(1, "preparing agent"); unlock_callback = after_unlock; for (i = 0; i < sizeof(flags)/sizeof(flags[0]); i++) { diag("Test with %s and %s\n", (flags[i] & NTDB_CONVERT) ? "CONVERT" : "DEFAULT", (flags[i] & NTDB_NOMMAP) ? "no mmap" : "mmap"); unlink(TEST_DBNAME); ntdb = ntdb_open(TEST_DBNAME, flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); opened = true; ok1(ntdb_transaction_start(ntdb) == 0); key = ntdb_mkdata("hi", strlen("hi")); data = ntdb_mkdata("world", strlen("world")); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb_transaction_commit(ntdb) == 0); ok(!errors, "We had %u open errors", errors); opened = false; ntdb_close(ntdb); } return exit_status(); } ntdb-1.0/test/run-57-die-during-transaction.c000066400000000000000000000156661224151530700210470ustar00rootroot00000000000000#include "private.h" #include #include "lock-tracking.h" #include "tap-interface.h" #include #include static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset); static ssize_t write_check(int fd, const void *buf, size_t count); static int ftruncate_check(int fd, off_t length); #define pwrite pwrite_check #define write write_check #define fcntl fcntl_with_lockcheck #define ftruncate ftruncate_check /* There's a malloc inside transaction_setup_recovery, and valgrind complains * when we longjmp and leak it. */ #define MAX_ALLOCATIONS 10 static void *allocated[MAX_ALLOCATIONS]; static unsigned max_alloc = 0; static void *malloc_noleak(size_t len) { unsigned int i; for (i = 0; i < MAX_ALLOCATIONS; i++) if (!allocated[i]) { allocated[i] = malloc(len); if (i > max_alloc) { max_alloc = i; diag("max_alloc: %i", max_alloc); } return allocated[i]; } diag("Too many allocations!"); abort(); } static void *realloc_noleak(void *p, size_t size) { unsigned int i; for (i = 0; i < MAX_ALLOCATIONS; i++) { if (allocated[i] == p) { if (i > max_alloc) { max_alloc = i; diag("max_alloc: %i", max_alloc); } return allocated[i] = realloc(p, size); } } diag("Untracked realloc!"); abort(); } static void free_noleak(void *p) { unsigned int i; /* We don't catch asprintf, so don't complain if we miss one. */ for (i = 0; i < MAX_ALLOCATIONS; i++) { if (allocated[i] == p) { allocated[i] = NULL; break; } } free(p); } static void free_all(void) { unsigned int i; for (i = 0; i < MAX_ALLOCATIONS; i++) { free(allocated[i]); allocated[i] = NULL; } } #define malloc malloc_noleak #define free(x) free_noleak(x) #define realloc realloc_noleak #include "ntdb-source.h" #undef malloc #undef free #undef realloc #undef write #undef pwrite #undef fcntl #undef ftruncate #include #include #include #include #include "external-agent.h" #include "logging.h" static bool in_transaction; static int target, current; static jmp_buf jmpbuf; #define TEST_DBNAME "run-57-die-during-transaction.ntdb" #define KEY_STRING "helloworld" #define DATA_STRING "Helloworld" static void maybe_die(int fd) { if (in_transaction && current++ == target) { longjmp(jmpbuf, 1); } } static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset) { ssize_t ret; maybe_die(fd); ret = pwrite(fd, buf, count, offset); if (ret != count) return ret; maybe_die(fd); return ret; } static ssize_t write_check(int fd, const void *buf, size_t count) { ssize_t ret; maybe_die(fd); ret = write(fd, buf, count); if (ret != count) return ret; maybe_die(fd); return ret; } static int ftruncate_check(int fd, off_t length) { int ret; maybe_die(fd); ret = ftruncate(fd, length); maybe_die(fd); return ret; } static bool test_death(enum operation op, struct agent *agent, bool pre_create_recovery) { struct ntdb_context *ntdb = NULL; NTDB_DATA key, data; enum agent_return ret; int needed_recovery = 0; current = target = 0; /* Big long data to force a change. */ data = ntdb_mkdata(DATA_STRING, strlen(DATA_STRING)); reset: unlink(TEST_DBNAME); ntdb = ntdb_open(TEST_DBNAME, NTDB_NOMMAP|MAYBE_NOSYNC, O_CREAT|O_TRUNC|O_RDWR, 0600, &tap_log_attr); if (!ntdb) { diag("Failed opening NTDB: %s", strerror(errno)); return false; } if (setjmp(jmpbuf) != 0) { /* We're partway through. Simulate our death. */ close(ntdb->file->fd); forget_locking(); in_transaction = false; ret = external_agent_operation(agent, NEEDS_RECOVERY, ""); if (ret == SUCCESS) needed_recovery++; else if (ret != FAILED) { diag("Step %u agent NEEDS_RECOVERY = %s", current, agent_return_name(ret)); return false; } /* Could be key, or data. */ ret = external_agent_operation(agent, op, KEY_STRING "=" KEY_STRING); if (ret != SUCCESS) { ret = external_agent_operation(agent, op, KEY_STRING "=" DATA_STRING); } if (ret != SUCCESS) { diag("Step %u op %s failed = %s", current, operation_name(op), agent_return_name(ret)); return false; } ret = external_agent_operation(agent, NEEDS_RECOVERY, ""); if (ret != FAILED) { diag("Still needs recovery after step %u = %s", current, agent_return_name(ret)); return false; } ret = external_agent_operation(agent, CHECK, ""); if (ret != SUCCESS) { diag("Step %u check failed = %s", current, agent_return_name(ret)); return false; } ret = external_agent_operation(agent, CLOSE, ""); if (ret != SUCCESS) { diag("Step %u close failed = %s", current, agent_return_name(ret)); return false; } /* Suppress logging as this tries to use closed fd. */ suppress_logging = true; suppress_lockcheck = true; ntdb_close(ntdb); suppress_logging = false; suppress_lockcheck = false; target++; current = 0; free_all(); goto reset; } /* Put key for agent to fetch. */ key = ntdb_mkdata(KEY_STRING, strlen(KEY_STRING)); if (pre_create_recovery) { /* Using a transaction now means we allocate the recovery * area immediately. That makes the later transaction smaller * and thus tickles a bug we had. */ if (ntdb_transaction_start(ntdb) != 0) return false; } if (ntdb_store(ntdb, key, key, NTDB_INSERT) != 0) return false; if (pre_create_recovery) { if (ntdb_transaction_commit(ntdb) != 0) return false; } /* This is the key we insert in transaction. */ key.dsize--; ret = external_agent_operation(agent, OPEN, TEST_DBNAME); if (ret != SUCCESS) errx(1, "Agent failed to open: %s", agent_return_name(ret)); ret = external_agent_operation(agent, FETCH, KEY_STRING "=" KEY_STRING); if (ret != SUCCESS) errx(1, "Agent failed find key: %s", agent_return_name(ret)); in_transaction = true; if (ntdb_transaction_start(ntdb) != 0) return false; if (ntdb_store(ntdb, key, data, NTDB_INSERT) != 0) return false; if (ntdb_transaction_commit(ntdb) != 0) return false; in_transaction = false; /* We made it! */ diag("Completed %u runs", current); ntdb_close(ntdb); ret = external_agent_operation(agent, CLOSE, ""); if (ret != SUCCESS) { diag("Step %u close failed = %s", current, agent_return_name(ret)); return false; } ok1(needed_recovery); ok1(locking_errors == 0); ok1(forget_locking() == 0); locking_errors = 0; return true; } int main(int argc, char *argv[]) { enum operation ops[] = { FETCH, STORE, TRANSACTION_START }; struct agent *agent; int i, j; plan_tests(24); unlock_callback = maybe_die; external_agent_free = free_noleak; agent = prepare_external_agent(); if (!agent) err(1, "preparing agent"); for (j = 0; j < 2; j++) { for (i = 0; i < sizeof(ops)/sizeof(ops[0]); i++) { diag("Testing %s after death (%s recovery area)", operation_name(ops[i]), j ? "with" : "without"); ok1(test_death(ops[i], agent, j)); } } free_external_agent(agent); return exit_status(); } ntdb-1.0/test/run-64-bit-tdb.c000066400000000000000000000046411224151530700160070ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" /* The largest 32-bit value which is still a multiple of NTDB_PGSIZE */ #define ALMOST_4G ((uint32_t)-NTDB_PGSIZE) /* And this pushes it over 32 bits */ #define A_LITTLE_BIT (NTDB_PGSIZE * 2) int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; if (sizeof(off_t) <= 4) { plan_tests(1); pass("No 64 bit off_t"); return exit_status(); } plan_tests(sizeof(flags) / sizeof(flags[0]) * 16); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { off_t old_size; NTDB_DATA k, d; struct hash_info h; struct ntdb_used_record rec; ntdb_off_t off; ntdb = ntdb_open("run-64-bit-ntdb.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; old_size = ntdb->file->map_size; /* Add a fake record to chew up the existing free space. */ k = ntdb_mkdata("fake", 4); d.dsize = ntdb->file->map_size - NEW_DATABASE_HDR_SIZE(ntdb->hash_bits) - 8; d.dptr = malloc(d.dsize); memset(d.dptr, 0, d.dsize); ok1(ntdb_store(ntdb, k, d, NTDB_INSERT) == 0); ok1(ntdb->file->map_size == old_size); free(d.dptr); /* This makes a sparse file */ ok1(ftruncate(ntdb->file->fd, ALMOST_4G) == 0); ok1(add_free_record(ntdb, old_size, ALMOST_4G - old_size, NTDB_LOCK_WAIT, false) == NTDB_SUCCESS); /* Now add a little record past the 4G barrier. */ ok1(ntdb_expand_file(ntdb, A_LITTLE_BIT) == NTDB_SUCCESS); ok1(add_free_record(ntdb, ALMOST_4G, A_LITTLE_BIT, NTDB_LOCK_WAIT, false) == NTDB_SUCCESS); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); /* Test allocation path. */ k = ntdb_mkdata("key", 4); d = ntdb_mkdata("data", 5); ok1(ntdb_store(ntdb, k, d, NTDB_INSERT) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); /* Make sure it put it at end as we expected. */ off = find_and_lock(ntdb, k, F_RDLCK, &h, &rec, NULL); ok1(off >= ALMOST_4G); ntdb_unlock_hash(ntdb, h.h, F_RDLCK); ok1(ntdb_fetch(ntdb, k, &d) == 0); ok1(d.dsize == 5); ok1(strcmp((char *)d.dptr, "data") == 0); free(d.dptr); ok1(ntdb_delete(ntdb, k) == 0); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); ntdb_close(ntdb); } /* We might get messages about mmap failing, so don't test * tap_log_messages */ return exit_status(); } ntdb-1.0/test/run-90-get-set-attributes.c000066400000000000000000000114021224151530700202060ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" static int mylock(int fd, int rw, off_t off, off_t len, bool waitflag, void *unused) { return 0; } static int myunlock(int fd, int rw, off_t off, off_t len, void *unused) { return 0; } static uint32_t hash_fn(const void *key, size_t len, uint32_t seed, void *priv) { return 0; } int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; union ntdb_attribute seed_attr; union ntdb_attribute hash_attr; union ntdb_attribute lock_attr; seed_attr.base.attr = NTDB_ATTRIBUTE_SEED; seed_attr.base.next = &hash_attr; seed_attr.seed.seed = 100; hash_attr.base.attr = NTDB_ATTRIBUTE_HASH; hash_attr.base.next = &lock_attr; hash_attr.hash.fn = hash_fn; hash_attr.hash.data = &hash_attr; lock_attr.base.attr = NTDB_ATTRIBUTE_FLOCK; lock_attr.base.next = &tap_log_attr; lock_attr.flock.lock = mylock; lock_attr.flock.unlock = myunlock; lock_attr.flock.data = &lock_attr; plan_tests(sizeof(flags) / sizeof(flags[0]) * 50); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { union ntdb_attribute attr; /* First open with no attributes. */ ntdb = ntdb_open("run-90-get-set-attributes.ntdb", flags[i] |MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, NULL); ok1(ntdb); /* Get log on no attributes will fail */ attr.base.attr = NTDB_ATTRIBUTE_LOG; ok1(ntdb_get_attribute(ntdb, &attr) == NTDB_ERR_NOEXIST); /* These always work. */ attr.base.attr = NTDB_ATTRIBUTE_HASH; ok1(ntdb_get_attribute(ntdb, &attr) == 0); ok1(attr.base.attr == NTDB_ATTRIBUTE_HASH); ok1(attr.hash.fn == ntdb_jenkins_hash); attr.base.attr = NTDB_ATTRIBUTE_FLOCK; ok1(ntdb_get_attribute(ntdb, &attr) == 0); ok1(attr.base.attr == NTDB_ATTRIBUTE_FLOCK); ok1(attr.flock.lock == ntdb_fcntl_lock); ok1(attr.flock.unlock == ntdb_fcntl_unlock); attr.base.attr = NTDB_ATTRIBUTE_SEED; ok1(ntdb_get_attribute(ntdb, &attr) == 0); ok1(attr.base.attr == NTDB_ATTRIBUTE_SEED); /* This is possible, just astronomically unlikely. */ ok1(attr.seed.seed != 0); /* Unset attributes. */ ntdb_unset_attribute(ntdb, NTDB_ATTRIBUTE_LOG); ntdb_unset_attribute(ntdb, NTDB_ATTRIBUTE_FLOCK); /* Set them. */ ok1(ntdb_set_attribute(ntdb, &tap_log_attr) == 0); ok1(ntdb_set_attribute(ntdb, &lock_attr) == 0); /* These should fail. */ ok1(ntdb_set_attribute(ntdb, &seed_attr) == NTDB_ERR_EINVAL); ok1(tap_log_messages == 1); ok1(ntdb_set_attribute(ntdb, &hash_attr) == NTDB_ERR_EINVAL); ok1(tap_log_messages == 2); tap_log_messages = 0; /* Getting them should work as expected. */ attr.base.attr = NTDB_ATTRIBUTE_LOG; ok1(ntdb_get_attribute(ntdb, &attr) == 0); ok1(attr.base.attr == NTDB_ATTRIBUTE_LOG); ok1(attr.log.fn == tap_log_attr.log.fn); ok1(attr.log.data == tap_log_attr.log.data); attr.base.attr = NTDB_ATTRIBUTE_FLOCK; ok1(ntdb_get_attribute(ntdb, &attr) == 0); ok1(attr.base.attr == NTDB_ATTRIBUTE_FLOCK); ok1(attr.flock.lock == mylock); ok1(attr.flock.unlock == myunlock); ok1(attr.flock.data == &lock_attr); /* Unset them again. */ ntdb_unset_attribute(ntdb, NTDB_ATTRIBUTE_FLOCK); ok1(tap_log_messages == 0); ntdb_unset_attribute(ntdb, NTDB_ATTRIBUTE_LOG); ok1(tap_log_messages == 0); ntdb_close(ntdb); ok1(tap_log_messages == 0); /* Now open with all attributes. */ ntdb = ntdb_open("run-90-get-set-attributes.ntdb", flags[i] | MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &seed_attr); ok1(ntdb); /* Get will succeed */ attr.base.attr = NTDB_ATTRIBUTE_LOG; ok1(ntdb_get_attribute(ntdb, &attr) == 0); ok1(attr.base.attr == NTDB_ATTRIBUTE_LOG); ok1(attr.log.fn == tap_log_attr.log.fn); ok1(attr.log.data == tap_log_attr.log.data); attr.base.attr = NTDB_ATTRIBUTE_HASH; ok1(ntdb_get_attribute(ntdb, &attr) == 0); ok1(attr.base.attr == NTDB_ATTRIBUTE_HASH); ok1(attr.hash.fn == hash_fn); ok1(attr.hash.data == &hash_attr); attr.base.attr = NTDB_ATTRIBUTE_FLOCK; ok1(ntdb_get_attribute(ntdb, &attr) == 0); ok1(attr.base.attr == NTDB_ATTRIBUTE_FLOCK); ok1(attr.flock.lock == mylock); ok1(attr.flock.unlock == myunlock); ok1(attr.flock.data == &lock_attr); attr.base.attr = NTDB_ATTRIBUTE_SEED; ok1(ntdb_get_attribute(ntdb, &attr) == 0); ok1(attr.base.attr == NTDB_ATTRIBUTE_SEED); ok1(attr.seed.seed == seed_attr.seed.seed); /* Unset attributes. */ ntdb_unset_attribute(ntdb, NTDB_ATTRIBUTE_HASH); ok1(tap_log_messages == 1); ntdb_unset_attribute(ntdb, NTDB_ATTRIBUTE_SEED); ok1(tap_log_messages == 2); ntdb_unset_attribute(ntdb, NTDB_ATTRIBUTE_FLOCK); ntdb_unset_attribute(ntdb, NTDB_ATTRIBUTE_LOG); ok1(tap_log_messages == 2); tap_log_messages = 0; ntdb_close(ntdb); } return exit_status(); } ntdb-1.0/test/run-capabilities.c000066400000000000000000000173751224151530700166740ustar00rootroot00000000000000#include #include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" #include "layout.h" #include "failtest_helper.h" #include static size_t len_of(bool breaks_check, bool breaks_write, bool breaks_open) { size_t len = 0; if (breaks_check) len += 8; if (breaks_write) len += 16; if (breaks_open) len += 32; return len; } /* Creates a NTDB with various capabilities. */ static void create_ntdb(const char *name, unsigned int cap, bool breaks_check, bool breaks_write, bool breaks_open, ...) { NTDB_DATA key, data; va_list ap; struct ntdb_layout *layout; struct ntdb_context *ntdb; int fd, clen; union ntdb_attribute seed_attr; /* Force a seed which doesn't allow records to clash! */ seed_attr.base.attr = NTDB_ATTRIBUTE_SEED; seed_attr.base.next = &tap_log_attr; seed_attr.seed.seed = 0; key = ntdb_mkdata("Hello", 5); data = ntdb_mkdata("world", 5); /* Create a NTDB with some data, and some capabilities */ layout = new_ntdb_layout(); ntdb_layout_add_freetable(layout); ntdb_layout_add_used(layout, key, data, 6); clen = len_of(breaks_check, breaks_write, breaks_open); ntdb_layout_add_free(layout, 15496 - clen, 0); ntdb_layout_add_capability(layout, cap, breaks_write, breaks_check, breaks_open, clen); va_start(ap, breaks_open); while ((cap = va_arg(ap, int)) != 0) { breaks_check = va_arg(ap, int); breaks_write = va_arg(ap, int); breaks_open = va_arg(ap, int); key.dsize--; ntdb_layout_add_used(layout, key, data, 11 - key.dsize); clen = len_of(breaks_check, breaks_write, breaks_open); ntdb_layout_add_free(layout, 16304 - clen, 0); ntdb_layout_add_capability(layout, cap, breaks_write, breaks_check, breaks_open, clen); } va_end(ap); /* We open-code this, because we need to use the failtest write. */ ntdb = ntdb_layout_get(layout, failtest_free, &seed_attr); fd = open(name, O_RDWR|O_TRUNC|O_CREAT, 0600); if (fd < 0) err(1, "opening %s for writing", name); if (write(fd, ntdb->file->map_ptr, ntdb->file->map_size) != ntdb->file->map_size) err(1, "writing %s", name); close(fd); ntdb_close(ntdb); ntdb_layout_free(layout); } /* Note all the "goto out" early exits: they're to shorten failtest time. */ int main(int argc, char *argv[]) { struct ntdb_context *ntdb; char *summary; failtest_init(argc, argv); failtest_hook = block_repeat_failures; failtest_exit_check = exit_check_log; plan_tests(60); failtest_suppress = true; /* Capability says you can ignore it? */ create_ntdb("run-capabilities.ntdb", 1, false, false, false, 0); failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); failtest_suppress = true; if (!ok1(ntdb)) goto out; ok1(tap_log_messages == 0); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); ok1(tap_log_messages == 0); ntdb_close(ntdb); /* Two capabilitues say you can ignore them? */ create_ntdb("run-capabilities.ntdb", 1, false, false, false, 2, false, false, false, 0); failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); failtest_suppress = true; if (!ok1(ntdb)) goto out; ok1(tap_log_messages == 0); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); ok1(tap_log_messages == 0); ok1(ntdb_summary(ntdb, 0, &summary) == NTDB_SUCCESS); ok1(strstr(summary, "Capability 1\n")); free(summary); ntdb_close(ntdb); /* Capability says you can't check. */ create_ntdb("run-capabilities.ntdb", 1, false, false, false, 2, true, false, false, 0); failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); failtest_suppress = true; if (!ok1(ntdb)) goto out; ok1(tap_log_messages == 0); ok1(ntdb_get_flags(ntdb) & NTDB_CANT_CHECK); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); /* We expect a warning! */ ok1(tap_log_messages == 1); ok1(strstr(log_last, "capabilit")); ok1(ntdb_summary(ntdb, 0, &summary) == NTDB_SUCCESS); ok1(strstr(summary, "Capability 1\n")); ok1(strstr(summary, "Capability 2 (uncheckable)\n")); free(summary); ntdb_close(ntdb); /* Capability says you can't write. */ create_ntdb("run-capabilities.ntdb", 1, false, false, false, 2, false, true, false, 0); failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); failtest_suppress = true; /* We expect a message. */ ok1(!ntdb); if (!ok1(tap_log_messages == 2)) goto out; if (!ok1(strstr(log_last, "unknown"))) goto out; ok1(strstr(log_last, "write")); /* We can open it read-only though! */ failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDONLY, 0, &tap_log_attr); failtest_suppress = true; if (!ok1(ntdb)) goto out; ok1(tap_log_messages == 2); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); ok1(tap_log_messages == 2); ok1(ntdb_summary(ntdb, 0, &summary) == NTDB_SUCCESS); ok1(strstr(summary, "Capability 1\n")); ok1(strstr(summary, "Capability 2 (read-only)\n")); free(summary); ntdb_close(ntdb); /* Capability says you can't open. */ create_ntdb("run-capabilities.ntdb", 1, false, false, false, 2, false, false, true, 0); failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); failtest_suppress = true; /* We expect a message. */ ok1(!ntdb); if (!ok1(tap_log_messages == 3)) goto out; if (!ok1(strstr(log_last, "unknown"))) goto out; /* Combine capabilities correctly. */ create_ntdb("run-capabilities.ntdb", 1, false, false, false, 2, true, false, false, 3, false, true, false, 0); failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); failtest_suppress = true; /* We expect a message. */ ok1(!ntdb); if (!ok1(tap_log_messages == 4)) goto out; if (!ok1(strstr(log_last, "unknown"))) goto out; ok1(strstr(log_last, "write")); /* We can open it read-only though! */ failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDONLY, 0, &tap_log_attr); failtest_suppress = true; if (!ok1(ntdb)) goto out; ok1(tap_log_messages == 4); ok1(ntdb_get_flags(ntdb) & NTDB_CANT_CHECK); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); /* We expect a warning! */ ok1(tap_log_messages == 5); ok1(strstr(log_last, "unknown")); ok1(ntdb_summary(ntdb, 0, &summary) == NTDB_SUCCESS); ok1(strstr(summary, "Capability 1\n")); ok1(strstr(summary, "Capability 2 (uncheckable)\n")); ok1(strstr(summary, "Capability 3 (read-only)\n")); free(summary); ntdb_close(ntdb); /* Two capability flags in one. */ create_ntdb("run-capabilities.ntdb", 1, false, false, false, 2, true, true, false, 0); failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); failtest_suppress = true; /* We expect a message. */ ok1(!ntdb); if (!ok1(tap_log_messages == 6)) goto out; if (!ok1(strstr(log_last, "unknown"))) goto out; ok1(strstr(log_last, "write")); /* We can open it read-only though! */ failtest_suppress = false; ntdb = ntdb_open("run-capabilities.ntdb", MAYBE_NOSYNC, O_RDONLY, 0, &tap_log_attr); failtest_suppress = true; if (!ok1(ntdb)) goto out; ok1(tap_log_messages == 6); ok1(ntdb_get_flags(ntdb) & NTDB_CANT_CHECK); ok1(ntdb_check(ntdb, NULL, NULL) == NTDB_SUCCESS); /* We expect a warning! */ ok1(tap_log_messages == 7); ok1(strstr(log_last, "unknown")); ok1(ntdb_summary(ntdb, 0, &summary) == NTDB_SUCCESS); ok1(strstr(summary, "Capability 1\n")); ok1(strstr(summary, "Capability 2 (uncheckable,read-only)\n")); free(summary); ntdb_close(ntdb); out: failtest_exit(exit_status()); } ntdb-1.0/test/run-expand-in-transaction.c000066400000000000000000000024751224151530700204440ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = ntdb_mkdata("key", 3); NTDB_DATA data = ntdb_mkdata("data", 4); plan_tests(sizeof(flags) / sizeof(flags[0]) * 9 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { size_t size; NTDB_DATA k, d; ntdb = ntdb_open("run-expand-in-transaction.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; size = ntdb->file->map_size; /* Add a fake record to chew up the existing free space. */ k = ntdb_mkdata("fake", 4); d.dsize = ntdb->file->map_size - NEW_DATABASE_HDR_SIZE(ntdb->hash_bits) - 8; d.dptr = malloc(d.dsize); memset(d.dptr, 0, d.dsize); ok1(ntdb_store(ntdb, k, d, NTDB_INSERT) == 0); ok1(ntdb->file->map_size == size); free(d.dptr); ok1(ntdb_transaction_start(ntdb) == 0); ok1(ntdb_store(ntdb, key, data, NTDB_INSERT) == 0); ok1(ntdb->file->map_size > size); ok1(ntdb_transaction_commit(ntdb) == 0); ok1(ntdb->file->map_size > size); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/run-features.c000066400000000000000000000034551224151530700160530ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" int main(int argc, char *argv[]) { unsigned int i, j; struct ntdb_context *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; NTDB_DATA key = { (unsigned char *)&j, sizeof(j) }; NTDB_DATA data = { (unsigned char *)&j, sizeof(j) }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 8 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { uint64_t features; ntdb = ntdb_open("run-features.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; /* Put some stuff in there. */ for (j = 0; j < 100; j++) { if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != 0) fail("Storing in ntdb"); } /* Mess with features fields in hdr. */ features = (~NTDB_FEATURE_MASK ^ 1); ok1(ntdb_write_convert(ntdb, offsetof(struct ntdb_header, features_used), &features, sizeof(features)) == 0); ok1(ntdb_write_convert(ntdb, offsetof(struct ntdb_header, features_offered), &features, sizeof(features)) == 0); ntdb_close(ntdb); ntdb = ntdb_open("run-features.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR, 0, &tap_log_attr); ok1(ntdb); if (!ntdb) continue; /* Should not have changed features offered. */ ok1(ntdb_read_convert(ntdb, offsetof(struct ntdb_header, features_offered), &features, sizeof(features)) == 0); ok1(features == (~NTDB_FEATURE_MASK ^ 1)); /* Should have cleared unknown bits in features_used. */ ok1(ntdb_read_convert(ntdb, offsetof(struct ntdb_header, features_used), &features, sizeof(features)) == 0); ok1(features == (1 & NTDB_FEATURE_MASK)); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/run-lockall.c000066400000000000000000000036651224151530700156610ustar00rootroot00000000000000#include "private.h" #include #include "lock-tracking.h" #define fcntl fcntl_with_lockcheck #include "ntdb-source.h" #include "tap-interface.h" #include #include #include #include "external-agent.h" #include "logging.h" #define TEST_DBNAME "run-lockall.ntdb" #define KEY_STR "key" #undef fcntl int main(int argc, char *argv[]) { struct agent *agent; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; int i; plan_tests(13 * sizeof(flags)/sizeof(flags[0]) + 1); agent = prepare_external_agent(); if (!agent) err(1, "preparing agent"); for (i = 0; i < sizeof(flags)/sizeof(flags[0]); i++) { enum agent_return ret; struct ntdb_context *ntdb; ntdb = ntdb_open(TEST_DBNAME, flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(ntdb); ret = external_agent_operation(agent, OPEN, TEST_DBNAME); ok1(ret == SUCCESS); ok1(ntdb_lockall(ntdb) == NTDB_SUCCESS); ok1(external_agent_operation(agent, STORE, KEY_STR "=" KEY_STR) == WOULD_HAVE_BLOCKED); ok1(external_agent_operation(agent, FETCH, KEY_STR "=" KEY_STR) == WOULD_HAVE_BLOCKED); /* Test nesting. */ ok1(ntdb_lockall(ntdb) == NTDB_SUCCESS); ntdb_unlockall(ntdb); ntdb_unlockall(ntdb); ok1(external_agent_operation(agent, STORE, KEY_STR "=" KEY_STR) == SUCCESS); ok1(ntdb_lockall_read(ntdb) == NTDB_SUCCESS); ok1(external_agent_operation(agent, STORE, KEY_STR "=" KEY_STR) == WOULD_HAVE_BLOCKED); ok1(external_agent_operation(agent, FETCH, KEY_STR "=" KEY_STR) == SUCCESS); ok1(ntdb_lockall_read(ntdb) == NTDB_SUCCESS); ntdb_unlockall_read(ntdb); ntdb_unlockall_read(ntdb); ok1(external_agent_operation(agent, STORE, KEY_STR "=" KEY_STR) == SUCCESS); ok1(external_agent_operation(agent, CLOSE, NULL) == SUCCESS); ntdb_close(ntdb); } free_external_agent(agent); ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/run-remap-in-read_traverse.c000066400000000000000000000026421224151530700205660ustar00rootroot00000000000000#include "ntdb-source.h" /* We had a bug where we marked the ntdb read-only for a ntdb_traverse_read. * If we then expanded the ntdb, we would remap read-only, and later SEGV. */ #include "tap-interface.h" #include "external-agent.h" #include "logging.h" static bool file_larger(int fd, ntdb_len_t size) { struct stat st; fstat(fd, &st); return st.st_size != size; } static unsigned add_records_to_grow(struct agent *agent, int fd, ntdb_len_t size) { unsigned int i; for (i = 0; !file_larger(fd, size); i++) { char data[50]; sprintf(data, "%i=%i", i, i); if (external_agent_operation(agent, STORE, data) != SUCCESS) return 0; } diag("Added %u records to grow file", i); return i; } int main(int argc, char *argv[]) { unsigned int i; struct agent *agent; struct ntdb_context *ntdb; NTDB_DATA d = ntdb_mkdata("hello", 5); const char filename[] = "run-remap-in-read_traverse.ntdb"; plan_tests(4); agent = prepare_external_agent(); ntdb = ntdb_open(filename, MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ok1(external_agent_operation(agent, OPEN, filename) == SUCCESS); i = add_records_to_grow(agent, ntdb->file->fd, ntdb->file->map_size); /* Do a traverse. */ ok1(ntdb_traverse(ntdb, NULL, NULL) == i); /* Now store something! */ ok1(ntdb_store(ntdb, d, d, NTDB_INSERT) == 0); ok1(tap_log_messages == 0); ntdb_close(ntdb); free_external_agent(agent); return exit_status(); } ntdb-1.0/test/run-seed.c000066400000000000000000000027421224151530700151530ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" static int log_count = 0; /* Normally we get a log when setting random seed. */ static void my_log_fn(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *priv) { log_count++; } static union ntdb_attribute log_attr = { .log = { .base = { .attr = NTDB_ATTRIBUTE_LOG }, .fn = my_log_fn } }; int main(int argc, char *argv[]) { unsigned int i; struct ntdb_context *ntdb; union ntdb_attribute attr; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; attr.seed.base.attr = NTDB_ATTRIBUTE_SEED; attr.seed.base.next = &log_attr; attr.seed.seed = 42; plan_tests(sizeof(flags) / sizeof(flags[0]) * 4 + 4 * 3); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { struct ntdb_header hdr; int fd; ntdb = ntdb_open("run-seed.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &attr); ok1(ntdb); if (!ntdb) continue; ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(ntdb->hash_seed == 42); ok1(log_count == 0); ntdb_close(ntdb); if (flags[i] & NTDB_INTERNAL) continue; fd = open("run-seed.ntdb", O_RDONLY); ok1(fd >= 0); ok1(read(fd, &hdr, sizeof(hdr)) == sizeof(hdr)); if (flags[i] & NTDB_CONVERT) ok1(bswap_64(hdr.hash_seed) == 42); else ok1(hdr.hash_seed == 42); close(fd); } return exit_status(); } ntdb-1.0/test/run-tdb_errorstr.c000066400000000000000000000021741224151530700167450ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" int main(int argc, char *argv[]) { enum NTDB_ERROR e; plan_tests(NTDB_ERR_RDONLY*-1 + 2); for (e = NTDB_SUCCESS; e >= NTDB_ERR_RDONLY; e--) { switch (e) { case NTDB_SUCCESS: ok1(!strcmp(ntdb_errorstr(e), "Success")); break; case NTDB_ERR_IO: ok1(!strcmp(ntdb_errorstr(e), "IO Error")); break; case NTDB_ERR_LOCK: ok1(!strcmp(ntdb_errorstr(e), "Locking error")); break; case NTDB_ERR_OOM: ok1(!strcmp(ntdb_errorstr(e), "Out of memory")); break; case NTDB_ERR_EXISTS: ok1(!strcmp(ntdb_errorstr(e), "Record exists")); break; case NTDB_ERR_EINVAL: ok1(!strcmp(ntdb_errorstr(e), "Invalid parameter")); break; case NTDB_ERR_NOEXIST: ok1(!strcmp(ntdb_errorstr(e), "Record does not exist")); break; case NTDB_ERR_RDONLY: ok1(!strcmp(ntdb_errorstr(e), "write not permitted")); break; case NTDB_ERR_CORRUPT: ok1(!strcmp(ntdb_errorstr(e), "Corrupt database")); break; } } ok1(!strcmp(ntdb_errorstr(e), "Invalid error code")); return exit_status(); } ntdb-1.0/test/run-tdb_foreach.c000066400000000000000000000044231224151530700164710ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" static int drop_count(struct ntdb_context *ntdb, unsigned int *count) { if (--(*count) == 0) return 1; return 0; } static int set_found(struct ntdb_context *ntdb, bool found[3]) { unsigned int idx; if (strcmp(ntdb_name(ntdb), "run-ntdb_foreach0.ntdb") == 0) idx = 0; else if (strcmp(ntdb_name(ntdb), "run-ntdb_foreach1.ntdb") == 0) idx = 1; else if (strcmp(ntdb_name(ntdb), "run-ntdb_foreach2.ntdb") == 0) idx = 2; else abort(); if (found[idx]) abort(); found[idx] = true; return 0; } int main(int argc, char *argv[]) { unsigned int i, count; bool found[3]; struct ntdb_context *ntdb0, *ntdb1, *ntdb; int flags[] = { NTDB_DEFAULT, NTDB_NOMMAP, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; plan_tests(sizeof(flags) / sizeof(flags[0]) * 8); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb0 = ntdb_open("run-ntdb_foreach0.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ntdb1 = ntdb_open("run-ntdb_foreach1.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); ntdb = ntdb_open("run-ntdb_foreach2.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr); memset(found, 0, sizeof(found)); ntdb_foreach(set_found, found); ok1(found[0] && found[1] && found[2]); /* Test premature iteration termination */ count = 1; ntdb_foreach(drop_count, &count); ok1(count == 0); ntdb_close(ntdb1); memset(found, 0, sizeof(found)); ntdb_foreach(set_found, found); ok1(found[0] && !found[1] && found[2]); ntdb_close(ntdb); memset(found, 0, sizeof(found)); ntdb_foreach(set_found, found); ok1(found[0] && !found[1] && !found[2]); ntdb1 = ntdb_open("run-ntdb_foreach1.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR, 0600, &tap_log_attr); memset(found, 0, sizeof(found)); ntdb_foreach(set_found, found); ok1(found[0] && found[1] && !found[2]); ntdb_close(ntdb0); memset(found, 0, sizeof(found)); ntdb_foreach(set_found, found); ok1(!found[0] && found[1] && !found[2]); ntdb_close(ntdb1); memset(found, 0, sizeof(found)); ntdb_foreach(set_found, found); ok1(!found[0] && !found[1] && !found[2]); ok1(tap_log_messages == 0); } return exit_status(); } ntdb-1.0/test/run-traverse.c000066400000000000000000000113651224151530700160670ustar00rootroot00000000000000#include "ntdb-source.h" #include "tap-interface.h" #include "logging.h" #define NUM_RECORDS 1000 /* We use the same seed which we saw a failure on. */ static uint32_t fixedhash(const void *key, size_t len, uint32_t seed, void *p) { return hash64_stable((const unsigned char *)key, len, *(uint64_t *)p); } static bool store_records(struct ntdb_context *ntdb) { int i; NTDB_DATA key = { (unsigned char *)&i, sizeof(i) }; NTDB_DATA data = { (unsigned char *)&i, sizeof(i) }; for (i = 0; i < NUM_RECORDS; i++) if (ntdb_store(ntdb, key, data, NTDB_REPLACE) != 0) return false; return true; } struct trav_data { unsigned int calls, call_limit; int low, high; bool mismatch; bool delete; enum NTDB_ERROR delete_error; }; static int trav(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, struct trav_data *td) { int val; td->calls++; if (key.dsize != sizeof(val) || dbuf.dsize != sizeof(val) || memcmp(key.dptr, dbuf.dptr, key.dsize) != 0) { td->mismatch = true; return -1; } memcpy(&val, dbuf.dptr, dbuf.dsize); if (val < td->low) td->low = val; if (val > td->high) td->high = val; if (td->delete) { td->delete_error = ntdb_delete(ntdb, key); if (td->delete_error != NTDB_SUCCESS) { return -1; } } if (td->calls == td->call_limit) return 1; return 0; } struct trav_grow_data { unsigned int calls; unsigned int num_large; bool mismatch; enum NTDB_ERROR error; }; static int trav_grow(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, struct trav_grow_data *tgd) { int val; unsigned char buffer[128] = { 0 }; tgd->calls++; if (key.dsize != sizeof(val) || dbuf.dsize < sizeof(val) || memcmp(key.dptr, dbuf.dptr, key.dsize) != 0) { tgd->mismatch = true; return -1; } if (dbuf.dsize > sizeof(val)) /* We must have seen this before! */ tgd->num_large++; /* Make a big difference to the database. */ dbuf.dptr = buffer; dbuf.dsize = sizeof(buffer); tgd->error = ntdb_append(ntdb, key, dbuf); if (tgd->error != NTDB_SUCCESS) { return -1; } return 0; } int main(int argc, char *argv[]) { unsigned int i; int num; struct trav_data td; struct trav_grow_data tgd; struct ntdb_context *ntdb; uint64_t seed = 16014841315512641303ULL; int flags[] = { NTDB_INTERNAL, NTDB_DEFAULT, NTDB_NOMMAP, NTDB_INTERNAL|NTDB_CONVERT, NTDB_CONVERT, NTDB_NOMMAP|NTDB_CONVERT }; union ntdb_attribute hattr = { .hash = { .base = { NTDB_ATTRIBUTE_HASH }, .fn = fixedhash, .data = &seed } }; hattr.base.next = &tap_log_attr; plan_tests(sizeof(flags) / sizeof(flags[0]) * 32 + 1); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ntdb = ntdb_open("run-traverse.ntdb", flags[i]|MAYBE_NOSYNC, O_RDWR|O_CREAT|O_TRUNC, 0600, &hattr); ok1(ntdb); if (!ntdb) continue; ok1(ntdb_traverse(ntdb, NULL, NULL) == 0); ok1(store_records(ntdb)); num = ntdb_traverse(ntdb, NULL, NULL); ok1(num == NUM_RECORDS); /* Full traverse. */ td.calls = 0; td.call_limit = UINT_MAX; td.low = INT_MAX; td.high = INT_MIN; td.mismatch = false; td.delete = false; num = ntdb_traverse(ntdb, trav, &td); ok1(num == NUM_RECORDS); ok1(!td.mismatch); ok1(td.calls == NUM_RECORDS); ok1(td.low == 0); ok1(td.high == NUM_RECORDS-1); /* Short traverse. */ td.calls = 0; td.call_limit = NUM_RECORDS / 2; td.low = INT_MAX; td.high = INT_MIN; td.mismatch = false; td.delete = false; num = ntdb_traverse(ntdb, trav, &td); ok1(num == NUM_RECORDS / 2); ok1(!td.mismatch); ok1(td.calls == NUM_RECORDS / 2); ok1(td.low <= NUM_RECORDS / 2); ok1(td.high > NUM_RECORDS / 2); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(tap_log_messages == 0); /* Deleting traverse (delete everything). */ td.calls = 0; td.call_limit = UINT_MAX; td.low = INT_MAX; td.high = INT_MIN; td.mismatch = false; td.delete = true; td.delete_error = NTDB_SUCCESS; num = ntdb_traverse(ntdb, trav, &td); ok1(num == NUM_RECORDS); ok1(td.delete_error == NTDB_SUCCESS); ok1(!td.mismatch); ok1(td.calls == NUM_RECORDS); ok1(td.low == 0); ok1(td.high == NUM_RECORDS - 1); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Now it's empty! */ ok1(ntdb_traverse(ntdb, NULL, NULL) == 0); /* Re-add. */ ok1(store_records(ntdb)); ok1(ntdb_traverse(ntdb, NULL, NULL) == NUM_RECORDS); ok1(ntdb_check(ntdb, NULL, NULL) == 0); /* Grow. This will cause us to be reshuffled. */ tgd.calls = 0; tgd.num_large = 0; tgd.mismatch = false; tgd.error = NTDB_SUCCESS; ok1(ntdb_traverse(ntdb, trav_grow, &tgd) > 1); ok1(tgd.error == 0); ok1(!tgd.mismatch); ok1(ntdb_check(ntdb, NULL, NULL) == 0); ok1(tgd.num_large < tgd.calls); diag("growing db: %u calls, %u repeats", tgd.calls, tgd.num_large); ntdb_close(ntdb); } ok1(tap_log_messages == 0); return exit_status(); } ntdb-1.0/test/tap-interface.c000066400000000000000000000001101224151530700161360ustar00rootroot00000000000000#include "tap-interface.h" unsigned tap_ok_count, tap_ok_target = -1U; ntdb-1.0/test/tap-interface.h000066400000000000000000000032641224151530700161600ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Simplistic implementation of tap interface. Copyright (C) Rusty Russell 2012 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include #include "no-fsync.h" #ifndef __location__ #define __TAP_STRING_LINE1__(s) #s #define __TAP_STRING_LINE2__(s) __TAP_STRING_LINE1__(s) #define __TAP_STRING_LINE3__ __TAP_STRING_LINE2__(__LINE__) #define __location__ __FILE__ ":" __TAP_STRING_LINE3__ #endif extern unsigned tap_ok_count, tap_ok_target; #define plan_tests(num) do { tap_ok_target = (num); } while(0) #define ok(e, ...) ((e) ? (printf("."), tap_ok_count++, true) : (warnx(__VA_ARGS__), false)) #define ok1(e) ok((e), "%s:%s", __location__, #e) #define pass(...) (printf("."), tap_ok_count++) #define fail(...) warnx(__VA_ARGS__) #define diag(...) do { printf(__VA_ARGS__); printf("\n"); } while(0) #define exit_status() (tap_ok_count == tap_ok_target ? 0 : 1) ntdb-1.0/tools/000077500000000000000000000000001224151530700134415ustar00rootroot00000000000000ntdb-1.0/tools/Makefile000066400000000000000000000007521224151530700151050ustar00rootroot00000000000000OBJS:=../../ntdb.o ../../hash.o ../../tally.o CFLAGS:=-I../../.. -I.. -Wall -g -O3 #-g -pg LDFLAGS:=-L../../.. default: ntdbtorture ntdbtool ntdbdump ntdbrestore mkntdb speed growtdb-bench ntdbdump: ntdbdump.c $(OBJS) ntdbrestore: ntdbrestore.c $(OBJS) ntdbtorture: ntdbtorture.c $(OBJS) ntdbtool: ntdbtool.c $(OBJS) mkntdb: mkntdb.c $(OBJS) speed: speed.c $(OBJS) growtdb-bench: growtdb-bench.c $(OBJS) clean: rm -f ntdbtorture ntdbdump ntdbrestore ntdbtool mkntdb speed growtdb-bench ntdb-1.0/tools/growtdb-bench.c000066400000000000000000000065121224151530700163360ustar00rootroot00000000000000#include "ntdb.h" #include #include #include #include #include #include #include #include static void logfn(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *data) { fprintf(stderr, "ntdb:%s:%s:%s\n", ntdb_name(ntdb), ntdb_errorstr(ecode), message); } int main(int argc, char *argv[]) { unsigned int i, j, users, groups; NTDB_DATA idxkey, idxdata; NTDB_DATA k, d, gk; char cmd[100]; struct ntdb_context *ntdb; enum NTDB_ERROR ecode; union ntdb_attribute log; if (argc != 3) { printf("Usage: growtdb-bench \n"); exit(1); } users = atoi(argv[1]); groups = atoi(argv[2]); sprintf(cmd, "cat /proc/%i/statm", getpid()); log.base.attr = NTDB_ATTRIBUTE_LOG; log.base.next = NULL; log.log.fn = logfn; ntdb = ntdb_open("/tmp/growtdb.ntdb", NTDB_DEFAULT, O_RDWR|O_CREAT|O_TRUNC, 0600, &log); idxkey.dptr = (unsigned char *)"User index"; idxkey.dsize = strlen("User index"); idxdata.dsize = 51; idxdata.dptr = calloc(idxdata.dsize, 1); if (idxdata.dptr == NULL) { fprintf(stderr, "Unable to allocate memory for idxdata.dptr\n"); return -1; } /* Create users. */ k.dsize = 48; k.dptr = calloc(k.dsize, 1); if (k.dptr == NULL) { fprintf(stderr, "Unable to allocate memory for k.dptr\n"); return -1; } d.dsize = 64; d.dptr = calloc(d.dsize, 1); if (d.dptr == NULL) { fprintf(stderr, "Unable to allocate memory for d.dptr\n"); return -1; } ntdb_transaction_start(ntdb); for (i = 0; i < users; i++) { memcpy(k.dptr, &i, sizeof(i)); ecode = ntdb_store(ntdb, k, d, NTDB_INSERT); if (ecode != NTDB_SUCCESS) errx(1, "ntdb insert failed: %s", ntdb_errorstr(ecode)); /* This simulates a growing index record. */ ecode = ntdb_append(ntdb, idxkey, idxdata); if (ecode != NTDB_SUCCESS) errx(1, "ntdb append failed: %s", ntdb_errorstr(ecode)); } if ((ecode = ntdb_transaction_commit(ntdb)) != 0) errx(1, "ntdb commit1 failed: %s", ntdb_errorstr(ecode)); if ((ecode = ntdb_check(ntdb, NULL, NULL)) != 0) errx(1, "ntdb_check failed after initial insert!"); system(cmd); /* Now put them all in groups: add 32 bytes to each record for * a group. */ gk.dsize = 48; gk.dptr = calloc(k.dsize, 1); if (gk.dptr == NULL) { fprintf(stderr, "Unable to allocate memory for gk.dptr\n"); return -1; } gk.dptr[gk.dsize-1] = 1; d.dsize = 32; for (i = 0; i < groups; i++) { ntdb_transaction_start(ntdb); /* Create the "group". */ memcpy(gk.dptr, &i, sizeof(i)); ecode = ntdb_store(ntdb, gk, d, NTDB_INSERT); if (ecode != NTDB_SUCCESS) errx(1, "ntdb insert failed: %s", ntdb_errorstr(ecode)); /* Now populate it. */ for (j = 0; j < users; j++) { /* Append to the user. */ memcpy(k.dptr, &j, sizeof(j)); if ((ecode = ntdb_append(ntdb, k, d)) != 0) errx(1, "ntdb append failed: %s", ntdb_errorstr(ecode)); /* Append to the group. */ if ((ecode = ntdb_append(ntdb, gk, d)) != 0) errx(1, "ntdb append failed: %s", ntdb_errorstr(ecode)); } if ((ecode = ntdb_transaction_commit(ntdb)) != 0) errx(1, "ntdb commit2 failed: %s", ntdb_errorstr(ecode)); if ((ecode = ntdb_check(ntdb, NULL, NULL)) != 0) errx(1, "ntdb_check failed after iteration %i!", i); system(cmd); } return 0; } ntdb-1.0/tools/mkntdb.c000066400000000000000000000012011224151530700150560ustar00rootroot00000000000000#include "ntdb.h" #include #include #include #include int main(int argc, char *argv[]) { unsigned int i, num_recs; struct ntdb_context *ntdb; if (argc != 3 || (num_recs = atoi(argv[2])) == 0) errx(1, "Usage: mktdb "); ntdb = ntdb_open(argv[1], NTDB_DEFAULT, O_CREAT|O_TRUNC|O_RDWR, 0600,NULL); if (!ntdb) err(1, "Opening %s", argv[1]); for (i = 0; i < num_recs; i++) { NTDB_DATA d; d.dptr = (void *)&i; d.dsize = sizeof(i); if (ntdb_store(ntdb, d, d, NTDB_INSERT) != 0) err(1, "Failed to store record %i", i); } printf("Done\n"); return 0; } ntdb-1.0/tools/ntdbbackup.c000066400000000000000000000170721224151530700157310ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. low level ntdb backup and restore utility Copyright (C) Andrew Tridgell 2002 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* This program is meant for backup/restore of ntdb databases. Typical usage would be: tdbbackup *.ntdb when Samba shuts down cleanly, which will make a backup of all the local databases to *.bak files. Then on Samba startup you would use: tdbbackup -v *.ntdb and this will check the databases for corruption and if corruption is detected then the backup will be restored. You may also like to do a backup on a regular basis while Samba is running, perhaps using cron. The reason this program is needed is to cope with power failures while Samba is running. A power failure could lead to database corruption and Samba will then not start correctly. Note that many of the databases in Samba are transient and thus don't need to be backed up, so you can optimise the above a little by only running the backup on the critical databases. */ #include "config.h" #include "ntdb.h" #include "system/filesys.h" #ifdef HAVE_GETOPT_H #include #endif static int failed; static void ntdb_log(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *data) { fprintf(stderr, "%s:%s\n", ntdb_errorstr(ecode), message); } static char *add_suffix(const char *name, const char *suffix) { char *ret; int len = strlen(name) + strlen(suffix) + 1; ret = (char *)malloc(len); if (!ret) { fprintf(stderr,"Out of memory!\n"); exit(1); } snprintf(ret, len, "%s%s", name, suffix); return ret; } static int copy_fn(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state) { struct ntdb_context *ntdb_new = (struct ntdb_context *)state; enum NTDB_ERROR err; err = ntdb_store(ntdb_new, key, dbuf, NTDB_INSERT); if (err) { fprintf(stderr,"Failed to insert into %s: %s\n", ntdb_name(ntdb_new), ntdb_errorstr(err)); failed = 1; return 1; } return 0; } static int test_fn(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state) { return 0; } /* carefully backup a ntdb, validating the contents and only doing the backup if its OK this function is also used for restore */ static int backup_ntdb(const char *old_name, const char *new_name) { struct ntdb_context *ntdb; struct ntdb_context *ntdb_new; char *tmp_name; struct stat st; int count1, count2; enum NTDB_ERROR err; union ntdb_attribute log_attr; tmp_name = add_suffix(new_name, ".tmp"); /* stat the old ntdb to find its permissions */ if (stat(old_name, &st) != 0) { perror(old_name); free(tmp_name); return 1; } log_attr.base.attr = NTDB_ATTRIBUTE_LOG; log_attr.base.next = NULL; log_attr.log.fn = ntdb_log; /* open the old ntdb */ ntdb = ntdb_open(old_name, NTDB_DEFAULT, O_RDWR, 0, &log_attr); if (!ntdb) { printf("Failed to open %s\n", old_name); free(tmp_name); return 1; } unlink(tmp_name); ntdb_new = ntdb_open(tmp_name, NTDB_DEFAULT, O_RDWR|O_CREAT|O_EXCL, st.st_mode & 0777, &log_attr); if (!ntdb_new) { perror(tmp_name); free(tmp_name); return 1; } err = ntdb_transaction_start(ntdb); if (err) { fprintf(stderr, "Failed to start transaction on old ntdb: %s\n", ntdb_errorstr(err)); ntdb_close(ntdb); ntdb_close(ntdb_new); unlink(tmp_name); free(tmp_name); return 1; } /* lock the backup ntdb so that nobody else can change it */ err = ntdb_lockall(ntdb_new); if (err) { fprintf(stderr, "Failed to lock backup ntdb: %s\n", ntdb_errorstr(err)); ntdb_close(ntdb); ntdb_close(ntdb_new); unlink(tmp_name); free(tmp_name); return 1; } failed = 0; /* traverse and copy */ count1 = ntdb_traverse(ntdb, copy_fn, (void *)ntdb_new); if (count1 < 0 || failed) { fprintf(stderr,"failed to copy %s\n", old_name); ntdb_close(ntdb); ntdb_close(ntdb_new); unlink(tmp_name); free(tmp_name); return 1; } /* close the old ntdb */ ntdb_close(ntdb); /* copy done, unlock the backup ntdb */ ntdb_unlockall(ntdb_new); #ifdef HAVE_FDATASYNC if (fdatasync(ntdb_fd(ntdb_new)) != 0) { #else if (fsync(ntdb_fd(ntdb_new)) != 0) { #endif /* not fatal */ fprintf(stderr, "failed to fsync backup file\n"); } /* close the new ntdb and re-open read-only */ ntdb_close(ntdb_new); /* we don't need the hash attr any more */ log_attr.base.next = NULL; ntdb_new = ntdb_open(tmp_name, NTDB_DEFAULT, O_RDONLY, 0, &log_attr); if (!ntdb_new) { fprintf(stderr,"failed to reopen %s\n", tmp_name); unlink(tmp_name); perror(tmp_name); free(tmp_name); return 1; } /* traverse the new ntdb to confirm */ count2 = ntdb_traverse(ntdb_new, test_fn, NULL); if (count2 != count1) { fprintf(stderr,"failed to copy %s\n", old_name); ntdb_close(ntdb_new); unlink(tmp_name); free(tmp_name); return 1; } /* close the new ntdb and rename it to .bak */ ntdb_close(ntdb_new); if (rename(tmp_name, new_name) != 0) { perror(new_name); free(tmp_name); return 1; } free(tmp_name); return 0; } /* verify a ntdb and if it is corrupt then restore from *.bak */ static int verify_ntdb(const char *fname, const char *bak_name) { struct ntdb_context *ntdb; int count = -1; union ntdb_attribute log_attr; log_attr.base.attr = NTDB_ATTRIBUTE_LOG; log_attr.base.next = NULL; log_attr.log.fn = ntdb_log; /* open the ntdb */ ntdb = ntdb_open(fname, NTDB_DEFAULT, O_RDONLY, 0, &log_attr); /* traverse the ntdb, then close it */ if (ntdb) { count = ntdb_traverse(ntdb, test_fn, NULL); ntdb_close(ntdb); } /* count is < 0 means an error */ if (count < 0) { printf("restoring %s\n", fname); return backup_ntdb(bak_name, fname); } printf("%s : %d records\n", fname, count); return 0; } /* see if one file is newer than another */ static int file_newer(const char *fname1, const char *fname2) { struct stat st1, st2; if (stat(fname1, &st1) != 0) { return 0; } if (stat(fname2, &st2) != 0) { return 1; } return (st1.st_mtime > st2.st_mtime); } static void usage(void) { printf("Usage: ntdbbackup [options] \n\n"); printf(" -h this help message\n"); printf(" -v verify mode (restore if corrupt)\n"); printf(" -s suffix set the backup suffix\n"); printf(" -v verify mode (restore if corrupt)\n"); } int main(int argc, char *argv[]) { int i; int ret = 0; int c; int verify = 0; const char *suffix = ".bak"; while ((c = getopt(argc, argv, "vhs:")) != -1) { switch (c) { case 'h': usage(); exit(0); case 'v': verify = 1; break; case 's': suffix = optarg; break; } } argc -= optind; argv += optind; if (argc < 1) { usage(); exit(1); } for (i=0; i. */ #include "config.h" #include "ntdb.h" #ifdef HAVE_LIBREPLACE #include #include #include #else #include #include #include #include #include #include #include #endif static void print_data(NTDB_DATA d) { unsigned char *p = (unsigned char *)d.dptr; int len = d.dsize; while (len--) { if (isprint(*p) && !strchr("\"\\", *p)) { fputc(*p, stdout); } else { printf("\\%02X", *p); } p++; } } static int traverse_fn(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state) { printf("{\n"); printf("key(%d) = \"", (int)key.dsize); print_data(key); printf("\"\n"); printf("data(%d) = \"", (int)dbuf.dsize); print_data(dbuf); printf("\"\n"); printf("}\n"); return 0; } static int dump_ntdb(const char *fname, const char *keyname) { struct ntdb_context *ntdb; NTDB_DATA key, value; ntdb = ntdb_open(fname, 0, O_RDONLY, 0, NULL); if (!ntdb) { printf("Failed to open %s\n", fname); return 1; } if (!keyname) { ntdb_traverse(ntdb, traverse_fn, NULL); } else { key = ntdb_mkdata(keyname, strlen(keyname)); if (ntdb_fetch(ntdb, key, &value) != 0) { return 1; } else { print_data(value); free(value.dptr); } } return 0; } static void usage( void) { printf( "Usage: ntdbdump [options] \n\n"); printf( " -h this help message\n"); printf( " -k keyname dumps value of keyname\n"); } int main(int argc, char *argv[]) { char *fname, *keyname=NULL; int c; if (argc < 2) { printf("Usage: ntdbdump \n"); exit(1); } while ((c = getopt( argc, argv, "hk:")) != -1) { switch (c) { case 'h': usage(); exit( 0); case 'k': keyname = optarg; break; default: usage(); exit( 1); } } fname = argv[optind]; return dump_ntdb(fname, keyname); } ntdb-1.0/tools/ntdbrestore.c000066400000000000000000000114441224151530700161440ustar00rootroot00000000000000/* ntdbrestore -- construct a ntdb from (n)tdbdump output. Copyright (C) Rusty Russell 2012 Copyright (C) Volker Lendecke 2010 Copyright (C) Simon McVittie 2005 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #include "config.h" #include "ntdb.h" #include #ifdef HAVE_LIBREPLACE #include #include #else #include #include #include #include #include #endif static int read_linehead(FILE *f) { int i, c; int num_bytes; char prefix[128]; while (1) { c = getc(f); if (c == EOF) { return -1; } if (c == '(') { break; } } for (i=0; idptr = (unsigned char *)malloc(size); if (d->dptr == NULL) { return -1; } d->dsize = size; for (i=0; idptr[i] = (low|high); } else { d->dptr[i] = c; } } return 0; } static int swallow(FILE *f, const char *s, int *eof) { char line[128]; if (fgets(line, sizeof(line), f) == NULL) { if (eof != NULL) { *eof = 1; } return -1; } if (strcmp(line, s) != 0) { return -1; } return 0; } static bool read_rec(FILE *f, struct ntdb_context *ntdb, int *eof) { int length; NTDB_DATA key, data; bool ret = false; enum NTDB_ERROR e; key.dptr = NULL; data.dptr = NULL; if (swallow(f, "{\n", eof) == -1) { goto fail; } length = read_linehead(f); if (length == -1) { goto fail; } if (read_data(f, &key, length) == -1) { goto fail; } if (swallow(f, "\"\n", NULL) == -1) { goto fail; } length = read_linehead(f); if (length == -1) { goto fail; } if (read_data(f, &data, length) == -1) { goto fail; } if ((swallow(f, "\"\n", NULL) == -1) || (swallow(f, "}\n", NULL) == -1)) { goto fail; } e = ntdb_store(ntdb, key, data, NTDB_INSERT); if (e != NTDB_SUCCESS) { fprintf(stderr, "NTDB error: %s\n", ntdb_errorstr(e)); goto fail; } ret = true; fail: free(key.dptr); free(data.dptr); return ret; } static int restore_ntdb(const char *fname, unsigned int hsize) { struct ntdb_context *ntdb; union ntdb_attribute hashsize; hashsize.base.attr = NTDB_ATTRIBUTE_HASHSIZE; hashsize.base.next = NULL; hashsize.hashsize.size = hsize; ntdb = ntdb_open(fname, 0, O_RDWR|O_CREAT|O_EXCL, 0666, hsize ? &hashsize : NULL); if (!ntdb) { perror("ntdb_open"); fprintf(stderr, "Failed to open %s\n", fname); return 1; } while (1) { int eof = 0; if (!read_rec(stdin, ntdb, &eof)) { if (eof) { break; } return 1; } } if (ntdb_close(ntdb)) { fprintf(stderr, "Error closing ntdb\n"); return 1; } fprintf(stderr, "EOF\n"); return 0; } int main(int argc, char *argv[]) { unsigned int hsize = 0; const char *execname = argv[0]; if (argv[1] && strcmp(argv[1], "-h") == 0) { if (argv[2]) { hsize = atoi(argv[2]); } if (hsize == 0) { fprintf(stderr, "-h requires a integer value" " (eg. 128 or 131072)\n"); exit(1); } argv += 2; argc -= 2; } if (argc != 2) { printf("Usage: %s [-h ] dbname < tdbdump_output\n", execname); exit(1); } return restore_ntdb(argv[1], hsize); } ntdb-1.0/tools/ntdbtool.c000066400000000000000000000431031224151530700154330ustar00rootroot00000000000000/* Unix SMB/CIFS implementation. Samba database functions Copyright (C) Andrew Tridgell 1999-2000 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000 Copyright (C) Andrew Esh 2001 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #include "config.h" #include "ntdb.h" #ifdef HAVE_LIBREPLACE #include #include #include #include #else #include #include #include #include #include #include #include #include #include #include #endif static int do_command(void); const char *cmdname; char *arg1, *arg2; size_t arg1len, arg2len; int bIterate = 0; char *line; NTDB_DATA iterate_kbuf; char cmdline[1024]; static int disable_mmap; enum commands { CMD_CREATE_NTDB, CMD_OPEN_NTDB, CMD_TRANSACTION_START, CMD_TRANSACTION_COMMIT, CMD_TRANSACTION_CANCEL, CMD_ERASE, CMD_DUMP, CMD_INSERT, CMD_MOVE, CMD_STORE, CMD_SHOW, CMD_KEYS, CMD_HEXKEYS, CMD_DELETE, #if 0 CMD_LIST_HASH_FREE, CMD_LIST_FREE, #endif CMD_INFO, CMD_MMAP, CMD_SPEED, CMD_FIRST, CMD_NEXT, CMD_SYSTEM, CMD_CHECK, CMD_QUIT, CMD_HELP }; typedef struct { const char *name; enum commands cmd; } COMMAND_TABLE; COMMAND_TABLE cmd_table[] = { {"create", CMD_CREATE_NTDB}, {"open", CMD_OPEN_NTDB}, #if 0 {"transaction_start", CMD_TRANSACTION_START}, {"transaction_commit", CMD_TRANSACTION_COMMIT}, {"transaction_cancel", CMD_TRANSACTION_CANCEL}, #endif {"erase", CMD_ERASE}, {"dump", CMD_DUMP}, {"insert", CMD_INSERT}, {"move", CMD_MOVE}, {"store", CMD_STORE}, {"show", CMD_SHOW}, {"keys", CMD_KEYS}, {"hexkeys", CMD_HEXKEYS}, {"delete", CMD_DELETE}, #if 0 {"list", CMD_LIST_HASH_FREE}, {"free", CMD_LIST_FREE}, #endif {"info", CMD_INFO}, {"speed", CMD_SPEED}, {"mmap", CMD_MMAP}, {"first", CMD_FIRST}, {"1", CMD_FIRST}, {"next", CMD_NEXT}, {"n", CMD_NEXT}, {"check", CMD_CHECK}, {"quit", CMD_QUIT}, {"q", CMD_QUIT}, {"!", CMD_SYSTEM}, {NULL, CMD_HELP} }; struct timeval tp1,tp2; static void _start_timer(void) { gettimeofday(&tp1,NULL); } static double _end_timer(void) { gettimeofday(&tp2,NULL); return((tp2.tv_sec - tp1.tv_sec) + (tp2.tv_usec - tp1.tv_usec)*1.0e-6); } static void ntdb_log(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *data) { fprintf(stderr, "ntdb:%s:%s:%s\n", ntdb_name(ntdb), ntdb_errorstr(ecode), message); } /* a ntdb tool for manipulating a ntdb database */ static struct ntdb_context *ntdb; static int print_rec(struct ntdb_context *the_ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state); static int print_key(struct ntdb_context *the_ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state); static int print_hexkey(struct ntdb_context *the_ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state); static void print_asc(const char *buf,int len) { int i; /* We're probably printing ASCII strings so don't try to display the trailing NULL character. */ if (buf[len - 1] == 0) len--; for (i=0;i8) printf(" "); while (n--) printf(" "); n = i%16; if (n > 8) n = 8; print_asc(&buf[i-(i%16)],n); printf(" "); n = (i%16) - n; if (n>0) print_asc(&buf[i-n],n); printf("\n"); } } static void help(void) { printf("\n" "tdbtool: \n" " create dbname : create a database\n" " open dbname : open an existing database\n" " openjh dbname : open an existing database (jenkins hash)\n" " transaction_start : start a transaction\n" " transaction_commit : commit a transaction\n" " transaction_cancel : cancel a transaction\n" " erase : erase the database\n" " dump : dump the database as strings\n" " keys : dump the database keys as strings\n" " hexkeys : dump the database keys as hex values\n" " info : print summary info about the database\n" " insert key data : insert a record\n" " move key file : move a record to a destination ntdb\n" " store key data : store a record (replace)\n" " show key : show a record by key\n" " delete key : delete a record by key\n" #if 0 " list : print the database hash table and freelist\n" " free : print the database freelist\n" #endif " check : check the integrity of an opened database\n" " speed : perform speed tests on the database\n" " ! command : execute system command\n" " 1 | first : print the first record\n" " n | next : print the next record\n" " q | quit : terminate\n" " \\n : repeat 'next' command\n" "\n"); } static void terror(enum NTDB_ERROR err, const char *why) { if (err != NTDB_SUCCESS) printf("%s:%s\n", ntdb_errorstr(err), why); else printf("%s\n", why); } static void create_ntdb(const char *tdbname) { union ntdb_attribute log_attr; log_attr.base.attr = NTDB_ATTRIBUTE_LOG; log_attr.base.next = NULL; log_attr.log.fn = ntdb_log; if (ntdb) ntdb_close(ntdb); ntdb = ntdb_open(tdbname, (disable_mmap?NTDB_NOMMAP:0), O_RDWR | O_CREAT | O_TRUNC, 0600, &log_attr); if (!ntdb) { printf("Could not create %s: %s\n", tdbname, strerror(errno)); } } static void open_ntdb(const char *tdbname) { union ntdb_attribute log_attr; log_attr.base.attr = NTDB_ATTRIBUTE_LOG; log_attr.base.next = NULL; log_attr.log.fn = ntdb_log; if (ntdb) ntdb_close(ntdb); ntdb = ntdb_open(tdbname, disable_mmap?NTDB_NOMMAP:0, O_RDWR, 0600, &log_attr); if (!ntdb) { printf("Could not open %s: %s\n", tdbname, strerror(errno)); } } static void insert_ntdb(char *keyname, size_t keylen, char* data, size_t datalen) { NTDB_DATA key, dbuf; enum NTDB_ERROR ecode; if ((keyname == NULL) || (keylen == 0)) { terror(NTDB_SUCCESS, "need key"); return; } key.dptr = (unsigned char *)keyname; key.dsize = keylen; dbuf.dptr = (unsigned char *)data; dbuf.dsize = datalen; ecode = ntdb_store(ntdb, key, dbuf, NTDB_INSERT); if (ecode) { terror(ecode, "insert failed"); } } static void store_ntdb(char *keyname, size_t keylen, char* data, size_t datalen) { NTDB_DATA key, dbuf; enum NTDB_ERROR ecode; if ((keyname == NULL) || (keylen == 0)) { terror(NTDB_SUCCESS, "need key"); return; } if ((data == NULL) || (datalen == 0)) { terror(NTDB_SUCCESS, "need data"); return; } key.dptr = (unsigned char *)keyname; key.dsize = keylen; dbuf.dptr = (unsigned char *)data; dbuf.dsize = datalen; printf("Storing key:\n"); print_rec(ntdb, key, dbuf, NULL); ecode = ntdb_store(ntdb, key, dbuf, NTDB_REPLACE); if (ecode) { terror(ecode, "store failed"); } } static void show_ntdb(char *keyname, size_t keylen) { NTDB_DATA key, dbuf; enum NTDB_ERROR ecode; if ((keyname == NULL) || (keylen == 0)) { terror(NTDB_SUCCESS, "need key"); return; } key.dptr = (unsigned char *)keyname; key.dsize = keylen; ecode = ntdb_fetch(ntdb, key, &dbuf); if (ecode) { terror(ecode, "fetch failed"); return; } print_rec(ntdb, key, dbuf, NULL); free( dbuf.dptr ); } static void delete_ntdb(char *keyname, size_t keylen) { NTDB_DATA key; enum NTDB_ERROR ecode; if ((keyname == NULL) || (keylen == 0)) { terror(NTDB_SUCCESS, "need key"); return; } key.dptr = (unsigned char *)keyname; key.dsize = keylen; ecode = ntdb_delete(ntdb, key); if (ecode) { terror(ecode, "delete failed"); } } static void move_rec(char *keyname, size_t keylen, char* tdbname) { NTDB_DATA key, dbuf; struct ntdb_context *dst_ntdb; enum NTDB_ERROR ecode; if ((keyname == NULL) || (keylen == 0)) { terror(NTDB_SUCCESS, "need key"); return; } if ( !tdbname ) { terror(NTDB_SUCCESS, "need destination ntdb name"); return; } key.dptr = (unsigned char *)keyname; key.dsize = keylen; ecode = ntdb_fetch(ntdb, key, &dbuf); if (ecode) { terror(ecode, "fetch failed"); return; } print_rec(ntdb, key, dbuf, NULL); dst_ntdb = ntdb_open(tdbname, 0, O_RDWR, 0600, NULL); if ( !dst_ntdb ) { terror(NTDB_SUCCESS, "unable to open destination ntdb"); return; } ecode = ntdb_store( dst_ntdb, key, dbuf, NTDB_REPLACE); if (ecode) terror(ecode, "failed to move record"); else printf("record moved\n"); ntdb_close( dst_ntdb ); } static int print_rec(struct ntdb_context *the_ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state) { printf("\nkey %d bytes\n", (int)key.dsize); print_asc((const char *)key.dptr, key.dsize); printf("\ndata %d bytes\n", (int)dbuf.dsize); print_data((const char *)dbuf.dptr, dbuf.dsize); return 0; } static int print_key(struct ntdb_context *the_ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state) { printf("key %d bytes: ", (int)key.dsize); print_asc((const char *)key.dptr, key.dsize); printf("\n"); return 0; } static int print_hexkey(struct ntdb_context *the_ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state) { printf("key %d bytes\n", (int)key.dsize); print_data((const char *)key.dptr, key.dsize); printf("\n"); return 0; } static int total_bytes; static int traverse_fn(struct ntdb_context *the_ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state) { total_bytes += dbuf.dsize; return 0; } static void info_ntdb(void) { enum NTDB_ERROR ecode; char *summary; ecode = ntdb_summary(ntdb, NTDB_SUMMARY_HISTOGRAMS, &summary); if (ecode) { terror(ecode, "Getting summary"); } else { printf("%s", summary); free(summary); } } static void speed_ntdb(const char *tlimit) { unsigned timelimit = tlimit?atoi(tlimit):0; double t; int ops; if (timelimit == 0) timelimit = 5; ops = 0; printf("Testing store speed for %u seconds\n", timelimit); _start_timer(); do { long int r = random(); NTDB_DATA key, dbuf; key = ntdb_mkdata("store test", strlen("store test")); dbuf.dptr = (unsigned char *)&r; dbuf.dsize = sizeof(r); ntdb_store(ntdb, key, dbuf, NTDB_REPLACE); t = _end_timer(); ops++; } while (t < timelimit); printf("%10.3f ops/sec\n", ops/t); ops = 0; printf("Testing fetch speed for %u seconds\n", timelimit); _start_timer(); do { long int r = random(); NTDB_DATA key, dbuf; key = ntdb_mkdata("store test", strlen("store test")); dbuf.dptr = (unsigned char *)&r; dbuf.dsize = sizeof(r); ntdb_fetch(ntdb, key, &dbuf); t = _end_timer(); ops++; } while (t < timelimit); printf("%10.3f ops/sec\n", ops/t); ops = 0; printf("Testing transaction speed for %u seconds\n", timelimit); _start_timer(); do { long int r = random(); NTDB_DATA key, dbuf; key = ntdb_mkdata("transaction test", strlen("transaction test")); dbuf.dptr = (unsigned char *)&r; dbuf.dsize = sizeof(r); ntdb_transaction_start(ntdb); ntdb_store(ntdb, key, dbuf, NTDB_REPLACE); ntdb_transaction_commit(ntdb); t = _end_timer(); ops++; } while (t < timelimit); printf("%10.3f ops/sec\n", ops/t); ops = 0; printf("Testing traverse speed for %u seconds\n", timelimit); _start_timer(); do { ntdb_traverse(ntdb, traverse_fn, NULL); t = _end_timer(); ops++; } while (t < timelimit); printf("%10.3f ops/sec\n", ops/t); } static void toggle_mmap(void) { disable_mmap = !disable_mmap; if (disable_mmap) { printf("mmap is disabled\n"); } else { printf("mmap is enabled\n"); } } static char *ntdb_getline(const char *prompt) { static char thisline[1024]; char *p; fputs(prompt, stdout); thisline[0] = 0; p = fgets(thisline, sizeof(thisline)-1, stdin); if (p) p = strchr(p, '\n'); if (p) *p = 0; return p?thisline:NULL; } static int do_delete_fn(struct ntdb_context *the_ntdb, NTDB_DATA key, NTDB_DATA dbuf, void *state) { return ntdb_delete(the_ntdb, key); } static void first_record(struct ntdb_context *the_ntdb, NTDB_DATA *pkey) { NTDB_DATA dbuf; enum NTDB_ERROR ecode; ecode = ntdb_firstkey(the_ntdb, pkey); if (!ecode) ecode = ntdb_fetch(the_ntdb, *pkey, &dbuf); if (ecode) terror(ecode, "fetch failed"); else { print_rec(the_ntdb, *pkey, dbuf, NULL); } } static void next_record(struct ntdb_context *the_ntdb, NTDB_DATA *pkey) { NTDB_DATA dbuf; enum NTDB_ERROR ecode; ecode = ntdb_nextkey(the_ntdb, pkey); if (!ecode) ecode = ntdb_fetch(the_ntdb, *pkey, &dbuf); if (ecode) terror(ecode, "fetch failed"); else print_rec(the_ntdb, *pkey, dbuf, NULL); } static void check_db(struct ntdb_context *the_ntdb) { if (!the_ntdb) { printf("Error: No database opened!\n"); } else { if (ntdb_check(the_ntdb, NULL, NULL) != 0) printf("Integrity check for the opened database failed.\n"); else printf("Database integrity is OK.\n"); } } static int do_command(void) { COMMAND_TABLE *ctp = cmd_table; enum commands mycmd = CMD_HELP; int cmd_len; if (cmdname && strlen(cmdname) == 0) { mycmd = CMD_NEXT; } else { while (ctp->name) { cmd_len = strlen(ctp->name); if (strncmp(ctp->name,cmdname,cmd_len) == 0) { mycmd = ctp->cmd; break; } ctp++; } } switch (mycmd) { case CMD_CREATE_NTDB: bIterate = 0; create_ntdb(arg1); return 0; case CMD_OPEN_NTDB: bIterate = 0; open_ntdb(arg1); return 0; case CMD_SYSTEM: /* Shell command */ if (system(arg1) == -1) { terror(NTDB_SUCCESS, "system() call failed\n"); } return 0; case CMD_QUIT: return 1; default: /* all the rest require a open database */ if (!ntdb) { bIterate = 0; terror(NTDB_SUCCESS, "database not open"); help(); return 0; } switch (mycmd) { case CMD_TRANSACTION_START: bIterate = 0; ntdb_transaction_start(ntdb); return 0; case CMD_TRANSACTION_COMMIT: bIterate = 0; ntdb_transaction_commit(ntdb); return 0; case CMD_TRANSACTION_CANCEL: bIterate = 0; ntdb_transaction_cancel(ntdb); return 0; case CMD_ERASE: bIterate = 0; ntdb_traverse(ntdb, do_delete_fn, NULL); return 0; case CMD_DUMP: bIterate = 0; ntdb_traverse(ntdb, print_rec, NULL); return 0; case CMD_INSERT: bIterate = 0; insert_ntdb(arg1, arg1len,arg2,arg2len); return 0; case CMD_MOVE: bIterate = 0; move_rec(arg1,arg1len,arg2); return 0; case CMD_STORE: bIterate = 0; store_ntdb(arg1,arg1len,arg2,arg2len); return 0; case CMD_SHOW: bIterate = 0; show_ntdb(arg1, arg1len); return 0; case CMD_KEYS: ntdb_traverse(ntdb, print_key, NULL); return 0; case CMD_HEXKEYS: ntdb_traverse(ntdb, print_hexkey, NULL); return 0; case CMD_DELETE: bIterate = 0; delete_ntdb(arg1,arg1len); return 0; #if 0 case CMD_LIST_HASH_FREE: ntdb_dump_all(ntdb); return 0; case CMD_LIST_FREE: ntdb_printfreelist(ntdb); return 0; #endif case CMD_INFO: info_ntdb(); return 0; case CMD_SPEED: speed_ntdb(arg1); return 0; case CMD_MMAP: toggle_mmap(); return 0; case CMD_FIRST: bIterate = 1; first_record(ntdb, &iterate_kbuf); return 0; case CMD_NEXT: if (bIterate) next_record(ntdb, &iterate_kbuf); return 0; case CMD_CHECK: check_db(ntdb); return 0; case CMD_HELP: help(); return 0; case CMD_CREATE_NTDB: case CMD_OPEN_NTDB: case CMD_SYSTEM: case CMD_QUIT: /* * unhandled commands. cases included here to avoid compiler * warnings. */ return 0; } } return 0; } static char *convert_string(char *instring, size_t *sizep) { size_t length = 0; char *outp, *inp; char temp[3]; outp = inp = instring; while (*inp) { if (*inp == '\\') { inp++; if (*inp && strchr("0123456789abcdefABCDEF",(int)*inp)) { temp[0] = *inp++; temp[1] = '\0'; if (*inp && strchr("0123456789abcdefABCDEF",(int)*inp)) { temp[1] = *inp++; temp[2] = '\0'; } *outp++ = (char)strtol((const char *)temp,NULL,16); } else { *outp++ = *inp++; } } else { *outp++ = *inp++; } length++; } *sizep = length; return instring; } int main(int argc, char *argv[]) { cmdname = ""; arg1 = NULL; arg1len = 0; arg2 = NULL; arg2len = 0; if (argv[1]) { cmdname = "open"; arg1 = argv[1]; do_command(); cmdname = ""; arg1 = NULL; } switch (argc) { case 1: case 2: /* Interactive mode */ while ((cmdname = ntdb_getline("ntdb> "))) { arg2 = arg1 = NULL; if ((arg1 = strchr((const char *)cmdname,' ')) != NULL) { arg1++; arg2 = arg1; while (*arg2) { if (*arg2 == ' ') { *arg2++ = '\0'; break; } if ((*arg2++ == '\\') && (*arg2 == ' ')) { arg2++; } } } if (arg1) arg1 = convert_string(arg1,&arg1len); if (arg2) arg2 = convert_string(arg2,&arg2len); if (do_command()) break; } break; case 5: arg2 = convert_string(argv[4],&arg2len); case 4: arg1 = convert_string(argv[3],&arg1len); case 3: cmdname = argv[2]; default: do_command(); break; } if (ntdb) ntdb_close(ntdb); return 0; } ntdb-1.0/tools/ntdbtorture.c000066400000000000000000000267701224151530700161750ustar00rootroot00000000000000/* this tests ntdb by doing lots of ops from several simultaneous writers - that stresses the locking code. */ #include "config.h" #include "ntdb.h" #include #ifdef HAVE_LIBREPLACE #include #else #include #include #include #include #include #include #include #include #include #include #include #endif //#define REOPEN_PROB 30 #define DELETE_PROB 8 #define STORE_PROB 4 #define APPEND_PROB 6 #define TRANSACTION_PROB 10 #define TRANSACTION_PREPARE_PROB 2 #define LOCKSTORE_PROB 5 #define TRAVERSE_PROB 20 #define TRAVERSE_MOD_PROB 100 #define TRAVERSE_ABORT_PROB 500 #define CULL_PROB 100 #define KEYLEN 3 #define DATALEN 100 static struct ntdb_context *db; static int in_transaction; static int in_traverse; static int error_count; #if TRANSACTION_PROB static int always_transaction = 0; #endif static int loopnum; static int count_pipe; static union ntdb_attribute log_attr; static union ntdb_attribute seed_attr; static union ntdb_attribute hsize_attr; static void ntdb_log(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *data) { printf("ntdb:%s:%s:%s\n", ntdb_name(ntdb), ntdb_errorstr(ecode), message); fflush(stdout); #if 0 { char str[200]; signal(SIGUSR1, SIG_IGN); sprintf(str,"xterm -e gdb /proc/%u/exe %u", (unsigned int)getpid(), (unsigned int)getpid()); system(str); } #endif } #include "../private.h" static void segv_handler(int sig, siginfo_t *info, void *p) { char string[100]; sprintf(string, "%u: death at %p (map_ptr %p, map_size %zu)\n", (unsigned int)getpid(), info->si_addr, db->file->map_ptr, (size_t)db->file->map_size); if (write(2, string, strlen(string)) > 0) sleep(60); _exit(11); } static void warn_on_err(enum NTDB_ERROR e, struct ntdb_context *ntdb, const char *why) { if (e != NTDB_SUCCESS) { fprintf(stderr, "%u:%s:%s\n", (unsigned int)getpid(), why, ntdb ? ntdb_errorstr(e) : "(no ntdb)"); error_count++; } } static char *randbuf(int len) { char *buf; int i; buf = (char *)malloc(len+1); if (buf == NULL) { perror("randbuf: unable to allocate memory for buffer.\n"); exit(1); } for (i=0;i #include #include #include #include #include #include #include #include #include #include #include "ntdb.h" /* Nanoseconds per operation */ static size_t normalize(const struct timeval *start, const struct timeval *stop, unsigned int num) { struct timeval diff; timersub(stop, start, &diff); /* Floating point is more accurate here. */ return (double)(diff.tv_sec * 1000000 + diff.tv_usec) / num * 1000; } static size_t file_size(void) { struct stat st; if (stat("/tmp/speed.ntdb", &st) != 0) return -1; return st.st_size; } static int count_record(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA data, void *p) { int *total = p; *total += *(int *)data.dptr; return 0; } static void dump_and_clear_stats(struct ntdb_context **ntdb, int flags, union ntdb_attribute *attr) { union ntdb_attribute stats; enum NTDB_ERROR ecode; stats.base.attr = NTDB_ATTRIBUTE_STATS; stats.stats.size = sizeof(stats.stats); ecode = ntdb_get_attribute(*ntdb, &stats); if (ecode != NTDB_SUCCESS) errx(1, "Getting stats: %s", ntdb_errorstr(ecode)); printf("allocs = %llu\n", (unsigned long long)stats.stats.allocs); printf(" alloc_subhash = %llu\n", (unsigned long long)stats.stats.alloc_subhash); printf(" alloc_chain = %llu\n", (unsigned long long)stats.stats.alloc_chain); printf(" alloc_bucket_exact = %llu\n", (unsigned long long)stats.stats.alloc_bucket_exact); printf(" alloc_bucket_max = %llu\n", (unsigned long long)stats.stats.alloc_bucket_max); printf(" alloc_leftover = %llu\n", (unsigned long long)stats.stats.alloc_leftover); printf(" alloc_coalesce_tried = %llu\n", (unsigned long long)stats.stats.alloc_coalesce_tried); printf(" alloc_coalesce_iterate_clash = %llu\n", (unsigned long long)stats.stats.alloc_coalesce_iterate_clash); printf(" alloc_coalesce_lockfail = %llu\n", (unsigned long long)stats.stats.alloc_coalesce_lockfail); printf(" alloc_coalesce_race = %llu\n", (unsigned long long)stats.stats.alloc_coalesce_race); printf(" alloc_coalesce_succeeded = %llu\n", (unsigned long long)stats.stats.alloc_coalesce_succeeded); printf(" alloc_coalesce_num_merged = %llu\n", (unsigned long long)stats.stats.alloc_coalesce_num_merged); printf("compares = %llu\n", (unsigned long long)stats.stats.compares); printf(" compare_wrong_offsetbits = %llu\n", (unsigned long long)stats.stats.compare_wrong_offsetbits); printf(" compare_wrong_keylen = %llu\n", (unsigned long long)stats.stats.compare_wrong_keylen); printf(" compare_wrong_rechash = %llu\n", (unsigned long long)stats.stats.compare_wrong_rechash); printf(" compare_wrong_keycmp = %llu\n", (unsigned long long)stats.stats.compare_wrong_keycmp); printf("transactions = %llu\n", (unsigned long long)stats.stats.transactions); printf(" transaction_cancel = %llu\n", (unsigned long long)stats.stats.transaction_cancel); printf(" transaction_nest = %llu\n", (unsigned long long)stats.stats.transaction_nest); printf(" transaction_expand_file = %llu\n", (unsigned long long)stats.stats.transaction_expand_file); printf(" transaction_read_direct = %llu\n", (unsigned long long)stats.stats.transaction_read_direct); printf(" transaction_read_direct_fail = %llu\n", (unsigned long long)stats.stats.transaction_read_direct_fail); printf(" transaction_write_direct = %llu\n", (unsigned long long)stats.stats.transaction_write_direct); printf(" transaction_write_direct_fail = %llu\n", (unsigned long long)stats.stats.transaction_write_direct_fail); printf("expands = %llu\n", (unsigned long long)stats.stats.expands); printf("frees = %llu\n", (unsigned long long)stats.stats.frees); printf("locks = %llu\n", (unsigned long long)stats.stats.locks); printf(" lock_lowlevel = %llu\n", (unsigned long long)stats.stats.lock_lowlevel); printf(" lock_nonblock = %llu\n", (unsigned long long)stats.stats.lock_nonblock); printf(" lock_nonblock_fail = %llu\n", (unsigned long long)stats.stats.lock_nonblock_fail); /* Now clear. */ ntdb_close(*ntdb); *ntdb = ntdb_open("/tmp/speed.ntdb", flags, O_RDWR, 0, attr); } static void ntdb_log(struct ntdb_context *ntdb, enum ntdb_log_level level, enum NTDB_ERROR ecode, const char *message, void *data) { fprintf(stderr, "ntdb:%s:%s:%s\n", ntdb_name(ntdb), ntdb_errorstr(ecode), message); } int main(int argc, char *argv[]) { unsigned int i, j, num = 1000, stage = 0, stopat = -1; int flags = NTDB_DEFAULT; bool transaction = false, summary = false; NTDB_DATA key, data; struct ntdb_context *ntdb; struct timeval start, stop; union ntdb_attribute seed, log; bool do_stats = false; enum NTDB_ERROR ecode; /* Try to keep benchmarks even. */ seed.base.attr = NTDB_ATTRIBUTE_SEED; seed.base.next = NULL; seed.seed.seed = 0; log.base.attr = NTDB_ATTRIBUTE_LOG; log.base.next = &seed; log.log.fn = ntdb_log; if (argv[1] && strcmp(argv[1], "--internal") == 0) { flags = NTDB_INTERNAL; argc--; argv++; } if (argv[1] && strcmp(argv[1], "--transaction") == 0) { transaction = true; argc--; argv++; } if (argv[1] && strcmp(argv[1], "--no-sync") == 0) { flags |= NTDB_NOSYNC; argc--; argv++; } if (argv[1] && strcmp(argv[1], "--summary") == 0) { summary = true; argc--; argv++; } if (argv[1] && strcmp(argv[1], "--stats") == 0) { do_stats = true; argc--; argv++; } ntdb = ntdb_open("/tmp/speed.ntdb", flags, O_RDWR|O_CREAT|O_TRUNC, 0600, &log); if (!ntdb) err(1, "Opening /tmp/speed.ntdb"); key.dptr = (void *)&i; key.dsize = sizeof(i); data = key; if (argv[1]) { num = atoi(argv[1]); argv++; argc--; } if (argv[1]) { stopat = atoi(argv[1]); argv++; argc--; } /* Add 1000 records. */ printf("Adding %u records: ", num); fflush(stdout); if (transaction && (ecode = ntdb_transaction_start(ntdb))) errx(1, "starting transaction: %s", ntdb_errorstr(ecode)); gettimeofday(&start, NULL); for (i = 0; i < num; i++) if ((ecode = ntdb_store(ntdb, key, data, NTDB_INSERT)) != 0) errx(1, "Inserting key %u in ntdb: %s", i, ntdb_errorstr(ecode)); gettimeofday(&stop, NULL); if (transaction && (ecode = ntdb_transaction_commit(ntdb))) errx(1, "committing transaction: %s", ntdb_errorstr(ecode)); printf(" %zu ns (%zu bytes)\n", normalize(&start, &stop, num), file_size()); if (ntdb_check(ntdb, NULL, NULL)) errx(1, "ntdb_check failed!"); if (summary) { char *sumstr = NULL; ntdb_summary(ntdb, NTDB_SUMMARY_HISTOGRAMS, &sumstr); printf("%s\n", sumstr); free(sumstr); } if (do_stats) dump_and_clear_stats(&ntdb, flags, &log); if (++stage == stopat) exit(0); /* Finding 1000 records. */ printf("Finding %u records: ", num); fflush(stdout); if (transaction && (ecode = ntdb_transaction_start(ntdb))) errx(1, "starting transaction: %s", ntdb_errorstr(ecode)); gettimeofday(&start, NULL); for (i = 0; i < num; i++) { NTDB_DATA dbuf; if ((ecode = ntdb_fetch(ntdb, key, &dbuf)) != NTDB_SUCCESS || *(int *)dbuf.dptr != i) { errx(1, "Fetching key %u in ntdb gave %u", i, ecode ? ecode : *(int *)dbuf.dptr); } } gettimeofday(&stop, NULL); if (transaction && (ecode = ntdb_transaction_commit(ntdb))) errx(1, "committing transaction: %s", ntdb_errorstr(ecode)); printf(" %zu ns (%zu bytes)\n", normalize(&start, &stop, num), file_size()); if (ntdb_check(ntdb, NULL, NULL)) errx(1, "ntdb_check failed!"); if (summary) { char *sumstr = NULL; ntdb_summary(ntdb, NTDB_SUMMARY_HISTOGRAMS, &sumstr); printf("%s\n", sumstr); free(sumstr); } if (do_stats) dump_and_clear_stats(&ntdb, flags, &log); if (++stage == stopat) exit(0); /* Missing 1000 records. */ printf("Missing %u records: ", num); fflush(stdout); if (transaction && (ecode = ntdb_transaction_start(ntdb))) errx(1, "starting transaction: %s", ntdb_errorstr(ecode)); gettimeofday(&start, NULL); for (i = num; i < num*2; i++) { NTDB_DATA dbuf; ecode = ntdb_fetch(ntdb, key, &dbuf); if (ecode != NTDB_ERR_NOEXIST) errx(1, "Fetching key %u in ntdb gave %s", i, ntdb_errorstr(ecode)); } gettimeofday(&stop, NULL); if (transaction && (ecode = ntdb_transaction_commit(ntdb))) errx(1, "committing transaction: %s", ntdb_errorstr(ecode)); printf(" %zu ns (%zu bytes)\n", normalize(&start, &stop, num), file_size()); if (ntdb_check(ntdb, NULL, NULL)) errx(1, "ntdb_check failed!"); if (summary) { char *sumstr = NULL; ntdb_summary(ntdb, NTDB_SUMMARY_HISTOGRAMS, &sumstr); printf("%s\n", sumstr); free(sumstr); } if (do_stats) dump_and_clear_stats(&ntdb, flags, &log); if (++stage == stopat) exit(0); /* Traverse 1000 records. */ printf("Traversing %u records: ", num); fflush(stdout); if (transaction && (ecode = ntdb_transaction_start(ntdb))) errx(1, "starting transaction: %s", ntdb_errorstr(ecode)); i = 0; gettimeofday(&start, NULL); if (ntdb_traverse(ntdb, count_record, &i) != num) errx(1, "Traverse returned wrong number of records"); if (i != (num - 1) * (num / 2)) errx(1, "Traverse tallied to %u", i); gettimeofday(&stop, NULL); if (transaction && (ecode = ntdb_transaction_commit(ntdb))) errx(1, "committing transaction: %s", ntdb_errorstr(ecode)); printf(" %zu ns (%zu bytes)\n", normalize(&start, &stop, num), file_size()); if (ntdb_check(ntdb, NULL, NULL)) errx(1, "ntdb_check failed!"); if (summary) { char *sumstr = NULL; ntdb_summary(ntdb, NTDB_SUMMARY_HISTOGRAMS, &sumstr); printf("%s\n", sumstr); free(sumstr); } if (do_stats) dump_and_clear_stats(&ntdb, flags, &log); if (++stage == stopat) exit(0); /* Delete 1000 records (not in order). */ printf("Deleting %u records: ", num); fflush(stdout); if (transaction && (ecode = ntdb_transaction_start(ntdb))) errx(1, "starting transaction: %s", ntdb_errorstr(ecode)); gettimeofday(&start, NULL); for (j = 0; j < num; j++) { i = (j + 100003) % num; if ((ecode = ntdb_delete(ntdb, key)) != NTDB_SUCCESS) errx(1, "Deleting key %u in ntdb: %s", i, ntdb_errorstr(ecode)); } gettimeofday(&stop, NULL); if (transaction && (ecode = ntdb_transaction_commit(ntdb))) errx(1, "committing transaction: %s", ntdb_errorstr(ecode)); printf(" %zu ns (%zu bytes)\n", normalize(&start, &stop, num), file_size()); if (ntdb_check(ntdb, NULL, NULL)) errx(1, "ntdb_check failed!"); if (summary) { char *sumstr = NULL; ntdb_summary(ntdb, NTDB_SUMMARY_HISTOGRAMS, &sumstr); printf("%s\n", sumstr); free(sumstr); } if (do_stats) dump_and_clear_stats(&ntdb, flags, &log); if (++stage == stopat) exit(0); /* Re-add 1000 records (not in order). */ printf("Re-adding %u records: ", num); fflush(stdout); if (transaction && (ecode = ntdb_transaction_start(ntdb))) errx(1, "starting transaction: %s", ntdb_errorstr(ecode)); gettimeofday(&start, NULL); for (j = 0; j < num; j++) { i = (j + 100003) % num; if ((ecode = ntdb_store(ntdb, key, data, NTDB_INSERT)) != 0) errx(1, "Inserting key %u in ntdb: %s", i, ntdb_errorstr(ecode)); } gettimeofday(&stop, NULL); if (transaction && (ecode = ntdb_transaction_commit(ntdb))) errx(1, "committing transaction: %s", ntdb_errorstr(ecode)); printf(" %zu ns (%zu bytes)\n", normalize(&start, &stop, num), file_size()); if (ntdb_check(ntdb, NULL, NULL)) errx(1, "ntdb_check failed!"); if (summary) { char *sumstr = NULL; ntdb_summary(ntdb, NTDB_SUMMARY_HISTOGRAMS, &sumstr); printf("%s\n", sumstr); free(sumstr); } if (do_stats) dump_and_clear_stats(&ntdb, flags, &log); if (++stage == stopat) exit(0); /* Append 1000 records. */ if (transaction && (ecode = ntdb_transaction_start(ntdb))) errx(1, "starting transaction: %s", ntdb_errorstr(ecode)); printf("Appending %u records: ", num); fflush(stdout); gettimeofday(&start, NULL); for (i = 0; i < num; i++) if ((ecode = ntdb_append(ntdb, key, data)) != NTDB_SUCCESS) errx(1, "Appending key %u in ntdb: %s", i, ntdb_errorstr(ecode)); gettimeofday(&stop, NULL); if (transaction && (ecode = ntdb_transaction_commit(ntdb))) errx(1, "committing transaction: %s", ntdb_errorstr(ecode)); printf(" %zu ns (%zu bytes)\n", normalize(&start, &stop, num), file_size()); if (ntdb_check(ntdb, NULL, NULL)) errx(1, "ntdb_check failed!"); if (summary) { char *sumstr = NULL; ntdb_summary(ntdb, NTDB_SUMMARY_HISTOGRAMS, &sumstr); printf("%s\n", sumstr); free(sumstr); } if (++stage == stopat) exit(0); /* Churn 1000 records: not in order! */ if (transaction && (ecode = ntdb_transaction_start(ntdb))) errx(1, "starting transaction: %s", ntdb_errorstr(ecode)); printf("Churning %u records: ", num); fflush(stdout); gettimeofday(&start, NULL); for (j = 0; j < num; j++) { i = (j + 1000019) % num; if ((ecode = ntdb_delete(ntdb, key)) != NTDB_SUCCESS) errx(1, "Deleting key %u in ntdb: %s", i, ntdb_errorstr(ecode)); i += num; if ((ecode = ntdb_store(ntdb, key, data, NTDB_INSERT)) != 0) errx(1, "Inserting key %u in ntdb: %s", i, ntdb_errorstr(ecode)); } gettimeofday(&stop, NULL); if (transaction && (ecode = ntdb_transaction_commit(ntdb))) errx(1, "committing transaction: %s", ntdb_errorstr(ecode)); printf(" %zu ns (%zu bytes)\n", normalize(&start, &stop, num), file_size()); if (ntdb_check(ntdb, NULL, NULL)) errx(1, "ntdb_check failed!"); if (summary) { char *sumstr = NULL; ntdb_summary(ntdb, NTDB_SUMMARY_HISTOGRAMS, &sumstr); printf("%s\n", sumstr); free(sumstr); } if (do_stats) dump_and_clear_stats(&ntdb, flags, &log); if (++stage == stopat) exit(0); return 0; } ntdb-1.0/transaction.c000066400000000000000000001121401224151530700147710ustar00rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 2005 Copyright (C) Rusty Russell 2010 ** NOTE! The following LGPL license applies to the ntdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #include #define SAFE_FREE(ntdb, x) do { if ((x) != NULL) {ntdb->free_fn((void *)x, ntdb->alloc_data); (x)=NULL;} } while(0) /* transaction design: - only allow a single transaction at a time per database. This makes using the transaction API simpler, as otherwise the caller would have to cope with temporary failures in transactions that conflict with other current transactions - keep the transaction recovery information in the same file as the database, using a special 'transaction recovery' record pointed at by the header. This removes the need for extra journal files as used by some other databases - dynamically allocated the transaction recover record, re-using it for subsequent transactions. If a larger record is needed then ntdb_free() the old record to place it on the normal ntdb freelist before allocating the new record - during transactions, keep a linked list of writes all that have been performed by intercepting all ntdb_write() calls. The hooked transaction versions of ntdb_read() and ntdb_write() check this linked list and try to use the elements of the list in preference to the real database. - don't allow any locks to be held when a transaction starts, otherwise we can end up with deadlock (plus lack of lock nesting in POSIX locks would mean the lock is lost) - if the caller gains a lock during the transaction but doesn't release it then fail the commit - allow for nested calls to ntdb_transaction_start(), re-using the existing transaction record. If the inner transaction is canceled then a subsequent commit will fail - keep a mirrored copy of the ntdb hash chain heads to allow for the fast hash heads scan on traverse, updating the mirrored copy in the transaction version of ntdb_write - allow callers to mix transaction and non-transaction use of ntdb, although once a transaction is started then an exclusive lock is gained until the transaction is committed or canceled - the commit stategy involves first saving away all modified data into a linearised buffer in the transaction recovery area, then marking the transaction recovery area with a magic value to indicate a valid recovery record. In total 4 fsync/msync calls are needed per commit to prevent race conditions. It might be possible to reduce this to 3 or even 2 with some more work. - check for a valid recovery record on open of the ntdb, while the open lock is held. Automatically recover from the transaction recovery area if needed, then continue with the open as usual. This allows for smooth crash recovery with no administrator intervention. - if NTDB_NOSYNC is passed to flags in ntdb_open then transactions are still available, but fsync/msync calls are made. This means we still are safe against unexpected death during transaction commit, but not against machine reboots. */ /* hold the context of any current transaction */ struct ntdb_transaction { /* the original io methods - used to do IOs to the real db */ const struct ntdb_methods *io_methods; /* the list of transaction blocks. When a block is first written to, it gets created in this list */ uint8_t **blocks; size_t num_blocks; /* non-zero when an internal transaction error has occurred. All write operations will then fail until the transaction is ended */ int transaction_error; /* when inside a transaction we need to keep track of any nested ntdb_transaction_start() calls, as these are allowed, but don't create a new transaction */ unsigned int nesting; /* set when a prepare has already occurred */ bool prepared; ntdb_off_t magic_offset; /* old file size before transaction */ ntdb_len_t old_map_size; }; /* read while in a transaction. We need to check first if the data is in our list of transaction elements, then if not do a real read */ static enum NTDB_ERROR transaction_read(struct ntdb_context *ntdb, ntdb_off_t off, void *buf, ntdb_len_t len) { size_t blk; enum NTDB_ERROR ecode; /* break it down into block sized ops */ while (len + (off % NTDB_PGSIZE) > NTDB_PGSIZE) { ntdb_len_t len2 = NTDB_PGSIZE - (off % NTDB_PGSIZE); ecode = transaction_read(ntdb, off, buf, len2); if (ecode != NTDB_SUCCESS) { return ecode; } len -= len2; off += len2; buf = (void *)(len2 + (char *)buf); } if (len == 0) { return NTDB_SUCCESS; } blk = off / NTDB_PGSIZE; /* see if we have it in the block list */ if (ntdb->transaction->num_blocks <= blk || ntdb->transaction->blocks[blk] == NULL) { /* nope, do a real read */ ecode = ntdb->transaction->io_methods->tread(ntdb, off, buf, len); if (ecode != NTDB_SUCCESS) { goto fail; } return 0; } /* now copy it out of this block */ memcpy(buf, ntdb->transaction->blocks[blk] + (off % NTDB_PGSIZE), len); return NTDB_SUCCESS; fail: ntdb->transaction->transaction_error = 1; return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "transaction_read: failed at off=%zu len=%zu", (size_t)off, (size_t)len); } /* write while in a transaction */ static enum NTDB_ERROR transaction_write(struct ntdb_context *ntdb, ntdb_off_t off, const void *buf, ntdb_len_t len) { size_t blk; enum NTDB_ERROR ecode; /* Only a commit is allowed on a prepared transaction */ if (ntdb->transaction->prepared) { ecode = ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_ERROR, "transaction_write: transaction already" " prepared, write not allowed"); goto fail; } /* break it up into block sized chunks */ while (len + (off % NTDB_PGSIZE) > NTDB_PGSIZE) { ntdb_len_t len2 = NTDB_PGSIZE - (off % NTDB_PGSIZE); ecode = transaction_write(ntdb, off, buf, len2); if (ecode != NTDB_SUCCESS) { return ecode; } len -= len2; off += len2; if (buf != NULL) { buf = (const void *)(len2 + (const char *)buf); } } if (len == 0) { return NTDB_SUCCESS; } blk = off / NTDB_PGSIZE; off = off % NTDB_PGSIZE; if (ntdb->transaction->num_blocks <= blk) { uint8_t **new_blocks; /* expand the blocks array */ if (ntdb->transaction->blocks == NULL) { new_blocks = (uint8_t **)ntdb->alloc_fn(ntdb, (blk+1)*sizeof(uint8_t *), ntdb->alloc_data); } else { new_blocks = (uint8_t **)ntdb->expand_fn( ntdb->transaction->blocks, (blk+1)*sizeof(uint8_t *), ntdb->alloc_data); } if (new_blocks == NULL) { ecode = ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "transaction_write:" " failed to allocate"); goto fail; } memset(&new_blocks[ntdb->transaction->num_blocks], 0, (1+(blk - ntdb->transaction->num_blocks))*sizeof(uint8_t *)); ntdb->transaction->blocks = new_blocks; ntdb->transaction->num_blocks = blk+1; } /* allocate and fill a block? */ if (ntdb->transaction->blocks[blk] == NULL) { ntdb->transaction->blocks[blk] = (uint8_t *) ntdb->alloc_fn(ntdb->transaction->blocks, NTDB_PGSIZE, ntdb->alloc_data); if (ntdb->transaction->blocks[blk] == NULL) { ecode = ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "transaction_write:" " failed to allocate"); goto fail; } memset(ntdb->transaction->blocks[blk], 0, NTDB_PGSIZE); if (ntdb->transaction->old_map_size > blk * NTDB_PGSIZE) { ntdb_len_t len2 = NTDB_PGSIZE; if (len2 + (blk * NTDB_PGSIZE) > ntdb->transaction->old_map_size) { len2 = ntdb->transaction->old_map_size - (blk * NTDB_PGSIZE); } ecode = ntdb->transaction->io_methods->tread(ntdb, blk * NTDB_PGSIZE, ntdb->transaction->blocks[blk], len2); if (ecode != NTDB_SUCCESS) { ecode = ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "transaction_write:" " failed to" " read old block: %s", strerror(errno)); SAFE_FREE(ntdb, ntdb->transaction->blocks[blk]); goto fail; } } } /* overwrite part of an existing block */ if (buf == NULL) { memset(ntdb->transaction->blocks[blk] + off, 0, len); } else { memcpy(ntdb->transaction->blocks[blk] + off, buf, len); } return NTDB_SUCCESS; fail: ntdb->transaction->transaction_error = 1; return ecode; } /* write while in a transaction - this variant never expands the transaction blocks, it only updates existing blocks. This means it cannot change the recovery size */ static void transaction_write_existing(struct ntdb_context *ntdb, ntdb_off_t off, const void *buf, ntdb_len_t len) { size_t blk; /* break it up into block sized chunks */ while (len + (off % NTDB_PGSIZE) > NTDB_PGSIZE) { ntdb_len_t len2 = NTDB_PGSIZE - (off % NTDB_PGSIZE); transaction_write_existing(ntdb, off, buf, len2); len -= len2; off += len2; if (buf != NULL) { buf = (const void *)(len2 + (const char *)buf); } } if (len == 0) { return; } blk = off / NTDB_PGSIZE; off = off % NTDB_PGSIZE; if (ntdb->transaction->num_blocks <= blk || ntdb->transaction->blocks[blk] == NULL) { return; } /* overwrite part of an existing block */ memcpy(ntdb->transaction->blocks[blk] + off, buf, len); } /* out of bounds check during a transaction */ static enum NTDB_ERROR transaction_oob(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_len_t len, bool probe) { if ((off + len >= off && off + len <= ntdb->file->map_size) || probe) { return NTDB_SUCCESS; } ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_oob len %lld beyond transaction size %lld", (long long)(off + len), (long long)ntdb->file->map_size); return NTDB_ERR_IO; } /* transaction version of ntdb_expand(). */ static enum NTDB_ERROR transaction_expand_file(struct ntdb_context *ntdb, ntdb_off_t addition) { enum NTDB_ERROR ecode; assert((ntdb->file->map_size + addition) % NTDB_PGSIZE == 0); /* add a write to the transaction elements, so subsequent reads see the zero data */ ecode = transaction_write(ntdb, ntdb->file->map_size, NULL, addition); if (ecode == NTDB_SUCCESS) { ntdb->file->map_size += addition; } return ecode; } static void *transaction_direct(struct ntdb_context *ntdb, ntdb_off_t off, size_t len, bool write_mode) { size_t blk = off / NTDB_PGSIZE, end_blk; /* This is wrong for zero-length blocks, but will fail gracefully */ end_blk = (off + len - 1) / NTDB_PGSIZE; /* Can only do direct if in single block and we've already copied. */ if (write_mode) { ntdb->stats.transaction_write_direct++; if (blk != end_blk || blk >= ntdb->transaction->num_blocks || ntdb->transaction->blocks[blk] == NULL) { ntdb->stats.transaction_write_direct_fail++; return NULL; } return ntdb->transaction->blocks[blk] + off % NTDB_PGSIZE; } ntdb->stats.transaction_read_direct++; /* Single which we have copied? */ if (blk == end_blk && blk < ntdb->transaction->num_blocks && ntdb->transaction->blocks[blk]) return ntdb->transaction->blocks[blk] + off % NTDB_PGSIZE; /* Otherwise must be all not copied. */ while (blk <= end_blk) { if (blk >= ntdb->transaction->num_blocks) break; if (ntdb->transaction->blocks[blk]) { ntdb->stats.transaction_read_direct_fail++; return NULL; } blk++; } return ntdb->transaction->io_methods->direct(ntdb, off, len, false); } static ntdb_off_t transaction_read_off(struct ntdb_context *ntdb, ntdb_off_t off) { ntdb_off_t ret; enum NTDB_ERROR ecode; ecode = transaction_read(ntdb, off, &ret, sizeof(ret)); ntdb_convert(ntdb, &ret, sizeof(ret)); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } return ret; } static enum NTDB_ERROR transaction_write_off(struct ntdb_context *ntdb, ntdb_off_t off, ntdb_off_t val) { ntdb_convert(ntdb, &val, sizeof(val)); return transaction_write(ntdb, off, &val, sizeof(val)); } static const struct ntdb_methods transaction_methods = { transaction_read, transaction_write, transaction_oob, transaction_expand_file, transaction_direct, transaction_read_off, transaction_write_off, }; /* sync to disk */ static enum NTDB_ERROR transaction_sync(struct ntdb_context *ntdb, ntdb_off_t offset, ntdb_len_t length) { if (ntdb->flags & NTDB_NOSYNC) { return NTDB_SUCCESS; } if (fsync(ntdb->file->fd) != 0) { return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_transaction: fsync failed: %s", strerror(errno)); } #ifdef MS_SYNC if (ntdb->file->map_ptr) { ntdb_off_t moffset = offset & ~(getpagesize()-1); if (msync(moffset + (char *)ntdb->file->map_ptr, length + (offset - moffset), MS_SYNC) != 0) { return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR, "ntdb_transaction: msync failed: %s", strerror(errno)); } } #endif return NTDB_SUCCESS; } static void free_transaction_blocks(struct ntdb_context *ntdb) { int i; /* free all the transaction blocks */ for (i=0;itransaction->num_blocks;i++) { if (ntdb->transaction->blocks[i] != NULL) { ntdb->free_fn(ntdb->transaction->blocks[i], ntdb->alloc_data); } } SAFE_FREE(ntdb, ntdb->transaction->blocks); ntdb->transaction->num_blocks = 0; } static void _ntdb_transaction_cancel(struct ntdb_context *ntdb) { enum NTDB_ERROR ecode; if (ntdb->transaction == NULL) { ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_transaction_cancel: no transaction"); return; } if (ntdb->transaction->nesting != 0) { ntdb->transaction->transaction_error = 1; ntdb->transaction->nesting--; return; } ntdb->file->map_size = ntdb->transaction->old_map_size; free_transaction_blocks(ntdb); if (ntdb->transaction->magic_offset) { const struct ntdb_methods *methods = ntdb->transaction->io_methods; uint64_t invalid = NTDB_RECOVERY_INVALID_MAGIC; /* remove the recovery marker */ ecode = methods->twrite(ntdb, ntdb->transaction->magic_offset, &invalid, sizeof(invalid)); if (ecode == NTDB_SUCCESS) ecode = transaction_sync(ntdb, ntdb->transaction->magic_offset, sizeof(invalid)); if (ecode != NTDB_SUCCESS) { ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_cancel: failed to remove" " recovery magic"); } } if (ntdb->file->allrecord_lock.count) ntdb_allrecord_unlock(ntdb, ntdb->file->allrecord_lock.ltype); /* restore the normal io methods */ ntdb->io = ntdb->transaction->io_methods; ntdb_transaction_unlock(ntdb, F_WRLCK); if (ntdb_has_open_lock(ntdb)) ntdb_unlock_open(ntdb, F_WRLCK); SAFE_FREE(ntdb, ntdb->transaction); } /* start a ntdb transaction. No token is returned, as only a single transaction is allowed to be pending per ntdb_context */ _PUBLIC_ enum NTDB_ERROR ntdb_transaction_start(struct ntdb_context *ntdb) { enum NTDB_ERROR ecode; ntdb->stats.transactions++; /* some sanity checks */ if (ntdb->flags & NTDB_INTERNAL) { return ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_transaction_start:" " cannot start a transaction on an" " internal ntdb"); } if (ntdb->flags & NTDB_RDONLY) { return ntdb_logerr(ntdb, NTDB_ERR_RDONLY, NTDB_LOG_USE_ERROR, "ntdb_transaction_start:" " cannot start a transaction on a" " read-only ntdb"); } /* cope with nested ntdb_transaction_start() calls */ if (ntdb->transaction != NULL) { if (!(ntdb->flags & NTDB_ALLOW_NESTING)) { return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_USE_ERROR, "ntdb_transaction_start:" " already inside transaction"); } ntdb->transaction->nesting++; ntdb->stats.transaction_nest++; return 0; } if (ntdb_has_hash_locks(ntdb)) { /* the caller must not have any locks when starting a transaction as otherwise we'll be screwed by lack of nested locks in POSIX */ return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_USE_ERROR, "ntdb_transaction_start:" " cannot start a transaction with locks" " held"); } ntdb->transaction = (struct ntdb_transaction *) ntdb->alloc_fn(ntdb, sizeof(struct ntdb_transaction), ntdb->alloc_data); if (ntdb->transaction == NULL) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_transaction_start:" " cannot allocate"); } memset(ntdb->transaction, 0, sizeof(*ntdb->transaction)); /* get the transaction write lock. This is a blocking lock. As discussed with Volker, there are a number of ways we could make this async, which we will probably do in the future */ ecode = ntdb_transaction_lock(ntdb, F_WRLCK); if (ecode != NTDB_SUCCESS) { SAFE_FREE(ntdb, ntdb->transaction->blocks); SAFE_FREE(ntdb, ntdb->transaction); return ecode; } /* get a read lock over entire file. This is upgraded to a write lock during the commit */ ecode = ntdb_allrecord_lock(ntdb, F_RDLCK, NTDB_LOCK_WAIT, true); if (ecode != NTDB_SUCCESS) { goto fail_allrecord_lock; } /* make sure we know about any file expansions already done by anyone else */ ntdb_oob(ntdb, ntdb->file->map_size, 1, true); ntdb->transaction->old_map_size = ntdb->file->map_size; /* finally hook the io methods, replacing them with transaction specific methods */ ntdb->transaction->io_methods = ntdb->io; ntdb->io = &transaction_methods; return NTDB_SUCCESS; fail_allrecord_lock: ntdb_transaction_unlock(ntdb, F_WRLCK); SAFE_FREE(ntdb, ntdb->transaction->blocks); SAFE_FREE(ntdb, ntdb->transaction); return ecode; } /* cancel the current transaction */ _PUBLIC_ void ntdb_transaction_cancel(struct ntdb_context *ntdb) { ntdb->stats.transaction_cancel++; _ntdb_transaction_cancel(ntdb); } /* work out how much space the linearised recovery data will consume (worst case) */ static ntdb_len_t ntdb_recovery_size(struct ntdb_context *ntdb) { ntdb_len_t recovery_size = 0; int i; recovery_size = 0; for (i=0;itransaction->num_blocks;i++) { if (i * NTDB_PGSIZE >= ntdb->transaction->old_map_size) { break; } if (ntdb->transaction->blocks[i] == NULL) { continue; } recovery_size += 2*sizeof(ntdb_off_t) + NTDB_PGSIZE; } return recovery_size; } static enum NTDB_ERROR ntdb_recovery_area(struct ntdb_context *ntdb, const struct ntdb_methods *methods, ntdb_off_t *recovery_offset, struct ntdb_recovery_record *rec) { enum NTDB_ERROR ecode; *recovery_offset = ntdb_read_off(ntdb, offsetof(struct ntdb_header, recovery)); if (NTDB_OFF_IS_ERR(*recovery_offset)) { return NTDB_OFF_TO_ERR(*recovery_offset); } if (*recovery_offset == 0) { rec->max_len = 0; return NTDB_SUCCESS; } ecode = methods->tread(ntdb, *recovery_offset, rec, sizeof(*rec)); if (ecode != NTDB_SUCCESS) return ecode; ntdb_convert(ntdb, rec, sizeof(*rec)); /* ignore invalid recovery regions: can happen in crash */ if (rec->magic != NTDB_RECOVERY_MAGIC && rec->magic != NTDB_RECOVERY_INVALID_MAGIC) { *recovery_offset = 0; rec->max_len = 0; } return NTDB_SUCCESS; } static unsigned int same(const unsigned char *new, const unsigned char *old, unsigned int length) { unsigned int i; for (i = 0; i < length; i++) { if (new[i] != old[i]) break; } return i; } static unsigned int different(const unsigned char *new, const unsigned char *old, unsigned int length, unsigned int min_same, unsigned int *samelen) { unsigned int i; *samelen = 0; for (i = 0; i < length; i++) { if (new[i] == old[i]) { (*samelen)++; } else { if (*samelen >= min_same) { return i - *samelen; } *samelen = 0; } } if (*samelen < min_same) *samelen = 0; return length - *samelen; } /* Allocates recovery blob, without ntdb_recovery_record at head set up. */ static struct ntdb_recovery_record *alloc_recovery(struct ntdb_context *ntdb, ntdb_len_t *len) { struct ntdb_recovery_record *rec; size_t i; enum NTDB_ERROR ecode; unsigned char *p; const struct ntdb_methods *old_methods = ntdb->io; rec = ntdb->alloc_fn(ntdb, sizeof(*rec) + ntdb_recovery_size(ntdb), ntdb->alloc_data); if (!rec) { ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "transaction_setup_recovery:" " cannot allocate"); return NTDB_ERR_PTR(NTDB_ERR_OOM); } /* We temporarily revert to the old I/O methods, so we can use * ntdb_access_read */ ntdb->io = ntdb->transaction->io_methods; /* build the recovery data into a single blob to allow us to do a single large write, which should be more efficient */ p = (unsigned char *)(rec + 1); for (i=0;itransaction->num_blocks;i++) { ntdb_off_t offset; ntdb_len_t length; unsigned int off; const unsigned char *buffer; if (ntdb->transaction->blocks[i] == NULL) { continue; } offset = i * NTDB_PGSIZE; length = NTDB_PGSIZE; if (offset >= ntdb->transaction->old_map_size) { continue; } if (offset + length > ntdb->file->map_size) { ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_transaction_setup_recovery:" " transaction data over new region" " boundary"); goto fail; } buffer = ntdb_access_read(ntdb, offset, length, false); if (NTDB_PTR_IS_ERR(buffer)) { ecode = NTDB_PTR_ERR(buffer); goto fail; } /* Skip over anything the same at the start. */ off = same(ntdb->transaction->blocks[i], buffer, length); offset += off; while (off < length) { ntdb_len_t len1; unsigned int samelen; len1 = different(ntdb->transaction->blocks[i] + off, buffer + off, length - off, sizeof(offset) + sizeof(len1) + 1, &samelen); memcpy(p, &offset, sizeof(offset)); memcpy(p + sizeof(offset), &len1, sizeof(len1)); ntdb_convert(ntdb, p, sizeof(offset) + sizeof(len1)); p += sizeof(offset) + sizeof(len1); memcpy(p, buffer + off, len1); p += len1; off += len1 + samelen; offset += len1 + samelen; } ntdb_access_release(ntdb, buffer); } *len = p - (unsigned char *)(rec + 1); ntdb->io = old_methods; return rec; fail: ntdb->free_fn(rec, ntdb->alloc_data); ntdb->io = old_methods; return NTDB_ERR_PTR(ecode); } static ntdb_off_t create_recovery_area(struct ntdb_context *ntdb, ntdb_len_t rec_length, struct ntdb_recovery_record *rec) { ntdb_off_t off, recovery_off; ntdb_len_t addition; enum NTDB_ERROR ecode; const struct ntdb_methods *methods = ntdb->transaction->io_methods; /* round up to a multiple of page size. Overallocate, since each * such allocation forces us to expand the file. */ rec->max_len = ntdb_expand_adjust(ntdb->file->map_size, rec_length); /* Round up to a page. */ rec->max_len = ((sizeof(*rec) + rec->max_len + NTDB_PGSIZE-1) & ~(NTDB_PGSIZE-1)) - sizeof(*rec); off = ntdb->file->map_size; /* Restore ->map_size before calling underlying expand_file. Also so that we don't try to expand the file again in the transaction commit, which would destroy the recovery area */ addition = (ntdb->file->map_size - ntdb->transaction->old_map_size) + sizeof(*rec) + rec->max_len; ntdb->file->map_size = ntdb->transaction->old_map_size; ntdb->stats.transaction_expand_file++; ecode = methods->expand_file(ntdb, addition); if (ecode != NTDB_SUCCESS) { ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_recovery_allocate:" " failed to create recovery area"); return NTDB_ERR_TO_OFF(ecode); } /* we have to reset the old map size so that we don't try to expand the file again in the transaction commit, which would destroy the recovery area */ ntdb->transaction->old_map_size = ntdb->file->map_size; /* write the recovery header offset and sync - we can sync without a race here as the magic ptr in the recovery record has not been set */ recovery_off = off; ntdb_convert(ntdb, &recovery_off, sizeof(recovery_off)); ecode = methods->twrite(ntdb, offsetof(struct ntdb_header, recovery), &recovery_off, sizeof(ntdb_off_t)); if (ecode != NTDB_SUCCESS) { ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_recovery_allocate:" " failed to write recovery head"); return NTDB_ERR_TO_OFF(ecode); } transaction_write_existing(ntdb, offsetof(struct ntdb_header, recovery), &recovery_off, sizeof(ntdb_off_t)); return off; } /* setup the recovery data that will be used on a crash during commit */ static enum NTDB_ERROR transaction_setup_recovery(struct ntdb_context *ntdb) { ntdb_len_t recovery_size = 0; ntdb_off_t recovery_off = 0; ntdb_off_t old_map_size = ntdb->transaction->old_map_size; struct ntdb_recovery_record *recovery; const struct ntdb_methods *methods = ntdb->transaction->io_methods; uint64_t magic; enum NTDB_ERROR ecode; recovery = alloc_recovery(ntdb, &recovery_size); if (NTDB_PTR_IS_ERR(recovery)) return NTDB_PTR_ERR(recovery); /* If we didn't actually change anything we overwrote? */ if (recovery_size == 0) { /* In theory, we could have just appended data. */ if (ntdb->transaction->num_blocks * NTDB_PGSIZE < ntdb->transaction->old_map_size) { free_transaction_blocks(ntdb); } ntdb->free_fn(recovery, ntdb->alloc_data); return NTDB_SUCCESS; } ecode = ntdb_recovery_area(ntdb, methods, &recovery_off, recovery); if (ecode) { ntdb->free_fn(recovery, ntdb->alloc_data); return ecode; } if (recovery->max_len < recovery_size) { /* Not large enough. Free up old recovery area. */ if (recovery_off) { ntdb->stats.frees++; ecode = add_free_record(ntdb, recovery_off, sizeof(*recovery) + recovery->max_len, NTDB_LOCK_WAIT, true); ntdb->free_fn(recovery, ntdb->alloc_data); if (ecode != NTDB_SUCCESS) { return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_recovery_allocate:" " failed to free previous" " recovery area"); } /* Refresh recovery after add_free_record above. */ recovery = alloc_recovery(ntdb, &recovery_size); if (NTDB_PTR_IS_ERR(recovery)) return NTDB_PTR_ERR(recovery); } recovery_off = create_recovery_area(ntdb, recovery_size, recovery); if (NTDB_OFF_IS_ERR(recovery_off)) { ntdb->free_fn(recovery, ntdb->alloc_data); return NTDB_OFF_TO_ERR(recovery_off); } } /* Now we know size, convert rec header. */ recovery->magic = NTDB_RECOVERY_INVALID_MAGIC; recovery->len = recovery_size; recovery->eof = old_map_size; ntdb_convert(ntdb, recovery, sizeof(*recovery)); /* write the recovery data to the recovery area */ ecode = methods->twrite(ntdb, recovery_off, recovery, sizeof(*recovery) + recovery_size); if (ecode != NTDB_SUCCESS) { ntdb->free_fn(recovery, ntdb->alloc_data); return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_setup_recovery:" " failed to write recovery data"); } transaction_write_existing(ntdb, recovery_off, recovery, recovery_size); ntdb->free_fn(recovery, ntdb->alloc_data); /* as we don't have ordered writes, we have to sync the recovery data before we update the magic to indicate that the recovery data is present */ ecode = transaction_sync(ntdb, recovery_off, recovery_size); if (ecode != NTDB_SUCCESS) return ecode; magic = NTDB_RECOVERY_MAGIC; ntdb_convert(ntdb, &magic, sizeof(magic)); ntdb->transaction->magic_offset = recovery_off + offsetof(struct ntdb_recovery_record, magic); ecode = methods->twrite(ntdb, ntdb->transaction->magic_offset, &magic, sizeof(magic)); if (ecode != NTDB_SUCCESS) { return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_setup_recovery:" " failed to write recovery magic"); } transaction_write_existing(ntdb, ntdb->transaction->magic_offset, &magic, sizeof(magic)); /* ensure the recovery magic marker is on disk */ return transaction_sync(ntdb, ntdb->transaction->magic_offset, sizeof(magic)); } static enum NTDB_ERROR _ntdb_transaction_prepare_commit(struct ntdb_context *ntdb) { const struct ntdb_methods *methods; enum NTDB_ERROR ecode; if (ntdb->transaction == NULL) { return ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_transaction_prepare_commit:" " no transaction"); } if (ntdb->transaction->prepared) { _ntdb_transaction_cancel(ntdb); return ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_transaction_prepare_commit:" " transaction already prepared"); } if (ntdb->transaction->transaction_error) { _ntdb_transaction_cancel(ntdb); return ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_ERROR, "ntdb_transaction_prepare_commit:" " transaction error pending"); } if (ntdb->transaction->nesting != 0) { return NTDB_SUCCESS; } /* check for a null transaction */ if (ntdb->transaction->blocks == NULL) { return NTDB_SUCCESS; } methods = ntdb->transaction->io_methods; /* upgrade the main transaction lock region to a write lock */ ecode = ntdb_allrecord_upgrade(ntdb, NTDB_HASH_LOCK_START); if (ecode != NTDB_SUCCESS) { return ecode; } /* get the open lock - this prevents new users attaching to the database during the commit */ ecode = ntdb_lock_open(ntdb, F_WRLCK, NTDB_LOCK_WAIT|NTDB_LOCK_NOCHECK); if (ecode != NTDB_SUCCESS) { return ecode; } /* Sets up ntdb->transaction->recovery and * ntdb->transaction->magic_offset. */ ecode = transaction_setup_recovery(ntdb); if (ecode != NTDB_SUCCESS) { return ecode; } ntdb->transaction->prepared = true; /* expand the file to the new size if needed */ if (ntdb->file->map_size != ntdb->transaction->old_map_size) { ntdb_len_t add; add = ntdb->file->map_size - ntdb->transaction->old_map_size; /* Restore original map size for ntdb_expand_file */ ntdb->file->map_size = ntdb->transaction->old_map_size; ecode = methods->expand_file(ntdb, add); if (ecode != NTDB_SUCCESS) { return ecode; } } /* Keep the open lock until the actual commit */ return NTDB_SUCCESS; } /* prepare to commit the current transaction */ _PUBLIC_ enum NTDB_ERROR ntdb_transaction_prepare_commit(struct ntdb_context *ntdb) { return _ntdb_transaction_prepare_commit(ntdb); } /* commit the current transaction */ _PUBLIC_ enum NTDB_ERROR ntdb_transaction_commit(struct ntdb_context *ntdb) { const struct ntdb_methods *methods; int i; enum NTDB_ERROR ecode; if (ntdb->transaction == NULL) { return ntdb_logerr(ntdb, NTDB_ERR_EINVAL, NTDB_LOG_USE_ERROR, "ntdb_transaction_commit:" " no transaction"); } ntdb_trace(ntdb, "ntdb_transaction_commit"); if (ntdb->transaction->nesting != 0) { ntdb->transaction->nesting--; return NTDB_SUCCESS; } if (!ntdb->transaction->prepared) { ecode = _ntdb_transaction_prepare_commit(ntdb); if (ecode != NTDB_SUCCESS) { _ntdb_transaction_cancel(ntdb); return ecode; } } /* check for a null transaction (prepare_commit may do this!) */ if (ntdb->transaction->blocks == NULL) { _ntdb_transaction_cancel(ntdb); return NTDB_SUCCESS; } methods = ntdb->transaction->io_methods; /* perform all the writes */ for (i=0;itransaction->num_blocks;i++) { ntdb_off_t offset; ntdb_len_t length; if (ntdb->transaction->blocks[i] == NULL) { continue; } offset = i * NTDB_PGSIZE; length = NTDB_PGSIZE; ecode = methods->twrite(ntdb, offset, ntdb->transaction->blocks[i], length); if (ecode != NTDB_SUCCESS) { /* we've overwritten part of the data and possibly expanded the file, so we need to run the crash recovery code */ ntdb->io = methods; ntdb_transaction_recover(ntdb); _ntdb_transaction_cancel(ntdb); return ecode; } SAFE_FREE(ntdb, ntdb->transaction->blocks[i]); } SAFE_FREE(ntdb, ntdb->transaction->blocks); ntdb->transaction->num_blocks = 0; /* ensure the new data is on disk */ ecode = transaction_sync(ntdb, 0, ntdb->file->map_size); if (ecode != NTDB_SUCCESS) { return ecode; } /* TODO: maybe write to some dummy hdr field, or write to magic offset without mmap, before the last sync, instead of the utime() call */ /* on some systems (like Linux 2.6.x) changes via mmap/msync don't change the mtime of the file, this means the file may not be backed up (as ntdb rounding to block sizes means that file size changes are quite rare too). The following forces mtime changes when a transaction completes */ #if HAVE_UTIME utime(ntdb->name, NULL); #endif /* use a transaction cancel to free memory and remove the transaction locks: it "restores" map_size, too. */ ntdb->transaction->old_map_size = ntdb->file->map_size; _ntdb_transaction_cancel(ntdb); return NTDB_SUCCESS; } /* recover from an aborted transaction. Must be called with exclusive database write access already established (including the open lock to prevent new processes attaching) */ enum NTDB_ERROR ntdb_transaction_recover(struct ntdb_context *ntdb) { ntdb_off_t recovery_head, recovery_eof; unsigned char *data, *p; struct ntdb_recovery_record rec; enum NTDB_ERROR ecode; /* find the recovery area */ recovery_head = ntdb_read_off(ntdb, offsetof(struct ntdb_header,recovery)); if (NTDB_OFF_IS_ERR(recovery_head)) { ecode = NTDB_OFF_TO_ERR(recovery_head); return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " failed to read recovery head"); } if (recovery_head == 0) { /* we have never allocated a recovery record */ return NTDB_SUCCESS; } /* read the recovery record */ ecode = ntdb_read_convert(ntdb, recovery_head, &rec, sizeof(rec)); if (ecode != NTDB_SUCCESS) { return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " failed to read recovery record"); } if (rec.magic != NTDB_RECOVERY_MAGIC) { /* there is no valid recovery data */ return NTDB_SUCCESS; } if (ntdb->flags & NTDB_RDONLY) { return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " attempt to recover read only database"); } recovery_eof = rec.eof; data = (unsigned char *)ntdb->alloc_fn(ntdb, rec.len, ntdb->alloc_data); if (data == NULL) { return ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " failed to allocate recovery data"); } /* read the full recovery data */ ecode = ntdb->io->tread(ntdb, recovery_head + sizeof(rec), data, rec.len); if (ecode != NTDB_SUCCESS) { return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " failed to read recovery data"); } /* recover the file data */ p = data; while (p+sizeof(ntdb_off_t)+sizeof(ntdb_len_t) < data + rec.len) { ntdb_off_t ofs; ntdb_len_t len; ntdb_convert(ntdb, p, sizeof(ofs) + sizeof(len)); memcpy(&ofs, p, sizeof(ofs)); memcpy(&len, p + sizeof(ofs), sizeof(len)); p += sizeof(ofs) + sizeof(len); ecode = ntdb->io->twrite(ntdb, ofs, p, len); if (ecode != NTDB_SUCCESS) { ntdb->free_fn(data, ntdb->alloc_data); return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " failed to recover %zu bytes" " at offset %zu", (size_t)len, (size_t)ofs); } p += len; } ntdb->free_fn(data, ntdb->alloc_data); ecode = transaction_sync(ntdb, 0, ntdb->file->map_size); if (ecode != NTDB_SUCCESS) { return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " failed to sync recovery"); } /* if the recovery area is after the recovered eof then remove it */ if (recovery_eof <= recovery_head) { ecode = ntdb_write_off(ntdb, offsetof(struct ntdb_header, recovery), 0); if (ecode != NTDB_SUCCESS) { return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " failed to remove recovery head"); } } /* remove the recovery magic */ ecode = ntdb_write_off(ntdb, recovery_head + offsetof(struct ntdb_recovery_record, magic), NTDB_RECOVERY_INVALID_MAGIC); if (ecode != NTDB_SUCCESS) { return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " failed to remove recovery magic"); } ecode = transaction_sync(ntdb, 0, recovery_eof); if (ecode != NTDB_SUCCESS) { return ntdb_logerr(ntdb, ecode, NTDB_LOG_ERROR, "ntdb_transaction_recover:" " failed to sync2 recovery"); } ntdb_logerr(ntdb, NTDB_SUCCESS, NTDB_LOG_WARNING, "ntdb_transaction_recover: recovered %zu byte database", (size_t)recovery_eof); /* all done */ return NTDB_SUCCESS; } ntdb_bool_err ntdb_needs_recovery(struct ntdb_context *ntdb) { ntdb_off_t recovery_head; struct ntdb_recovery_record rec; enum NTDB_ERROR ecode; /* find the recovery area */ recovery_head = ntdb_read_off(ntdb, offsetof(struct ntdb_header,recovery)); if (NTDB_OFF_IS_ERR(recovery_head)) { return recovery_head; } if (recovery_head == 0) { /* we have never allocated a recovery record */ return false; } /* read the recovery record */ ecode = ntdb_read_convert(ntdb, recovery_head, &rec, sizeof(rec)); if (ecode != NTDB_SUCCESS) { return NTDB_ERR_TO_OFF(ecode); } return (rec.magic == NTDB_RECOVERY_MAGIC); } ntdb-1.0/traverse.c000066400000000000000000000052771224151530700143130ustar00rootroot00000000000000 /* Trivial Database 2: traverse function. Copyright (C) Rusty Russell 2010 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "private.h" #include _PUBLIC_ int64_t ntdb_traverse_(struct ntdb_context *ntdb, int (*fn)(struct ntdb_context *, NTDB_DATA, NTDB_DATA, void *), void *p) { enum NTDB_ERROR ecode; struct hash_info h; NTDB_DATA k, d; int64_t count = 0; k.dptr = NULL; for (ecode = first_in_hash(ntdb, &h, &k, &d.dsize); ecode == NTDB_SUCCESS; ecode = next_in_hash(ntdb, &h, &k, &d.dsize)) { d.dptr = k.dptr + k.dsize; count++; if (fn && fn(ntdb, k, d, p)) { ntdb->free_fn(k.dptr, ntdb->alloc_data); return count; } ntdb->free_fn(k.dptr, ntdb->alloc_data); } if (ecode != NTDB_ERR_NOEXIST) { return NTDB_ERR_TO_OFF(ecode); } return count; } _PUBLIC_ enum NTDB_ERROR ntdb_firstkey(struct ntdb_context *ntdb, NTDB_DATA *key) { struct hash_info h; return first_in_hash(ntdb, &h, key, NULL); } /* We lock twice, not very efficient. We could keep last key & h cached. */ _PUBLIC_ enum NTDB_ERROR ntdb_nextkey(struct ntdb_context *ntdb, NTDB_DATA *key) { struct hash_info h; struct ntdb_used_record rec; ntdb_off_t off; off = find_and_lock(ntdb, *key, F_RDLCK, &h, &rec, NULL); ntdb->free_fn(key->dptr, ntdb->alloc_data); if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } ntdb_unlock_hash(ntdb, h.h, F_RDLCK); /* If we found something, skip to next. */ if (off) h.bucket++; return next_in_hash(ntdb, &h, key, NULL); } static int wipe_one(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA data, enum NTDB_ERROR *ecode) { *ecode = ntdb_delete(ntdb, key); return (*ecode != NTDB_SUCCESS); } _PUBLIC_ enum NTDB_ERROR ntdb_wipe_all(struct ntdb_context *ntdb) { enum NTDB_ERROR ecode; int64_t count; ecode = ntdb_allrecord_lock(ntdb, F_WRLCK, NTDB_LOCK_WAIT, false); if (ecode != NTDB_SUCCESS) return ecode; /* FIXME: Be smarter. */ count = ntdb_traverse(ntdb, wipe_one, &ecode); if (count < 0) ecode = NTDB_OFF_TO_ERR(count); ntdb_allrecord_unlock(ntdb, F_WRLCK); return ecode; } ntdb-1.0/wscript000066400000000000000000000272021224151530700137220ustar00rootroot00000000000000#!/usr/bin/env python APPNAME = 'ntdb' VERSION = '1.0' blddir = 'bin' import sys, os # find the buildtools directory srcdir = '.' while not os.path.exists(srcdir+'/buildtools') and len(srcdir.split('/')) < 5: srcdir = '../' + srcdir sys.path.insert(0, srcdir + '/buildtools/wafsamba') import wafsamba, samba_dist, Options, Logs, glob samba_dist.DIST_DIRS('lib/ntdb:. lib/replace:lib/replace lib/ccan:lib/ccan buildtools:buildtools') def set_options(opt): opt.BUILTIN_DEFAULT('replace,ccan') opt.PRIVATE_EXTENSION_DEFAULT('ntdb', noextension='ntdb') opt.RECURSE('lib/replace') opt.add_option('--valgrind', help=("use valgrind on tests programs"), action="store_true", dest='VALGRIND', default=False) opt.add_option('--valgrind-log', help=("where to put the valgrind log"), action="store", dest='VALGRINDLOG', default=None) if opt.IN_LAUNCH_DIR(): opt.add_option('--disable-python', help=("disable the pyntdb module"), action="store_true", dest='disable_python', default=False) def configure(conf): conf.RECURSE('lib/replace') conf.RECURSE('lib/ccan') conf.env.NTDB_TEST_RUN_SRC=['test/run-001-encode.c', 'test/run-001-fls.c', 'test/run-01-new_database.c', 'test/run-02-expand.c', 'test/run-03-coalesce.c', 'test/run-04-basichash.c', 'test/run-05-readonly-open.c', 'test/run-10-simple-store.c', 'test/run-11-simple-fetch.c', 'test/run-12-check.c', 'test/run-15-append.c', 'test/run-25-hashoverload.c', 'test/run-30-exhaust-before-expand.c', 'test/run-35-convert.c', 'test/run-50-multiple-freelists.c', 'test/run-56-open-during-transaction.c', 'test/run-57-die-during-transaction.c', 'test/run-64-bit-tdb.c', 'test/run-90-get-set-attributes.c', 'test/run-capabilities.c', 'test/run-expand-in-transaction.c', 'test/run-features.c', 'test/run-lockall.c', 'test/run-remap-in-read_traverse.c', 'test/run-seed.c', 'test/run-tdb_errorstr.c', 'test/run-tdb_foreach.c', 'test/run-traverse.c'] conf.env.NTDB_TEST_API_SRC=['test/api-12-store.c', 'test/api-13-delete.c', 'test/api-14-exists.c', 'test/api-16-wipe_all.c', 'test/api-20-alloc-attr.c', 'test/api-21-parse_record.c', 'test/api-55-transaction.c', 'test/api-60-noop-transaction.c', 'test/api-80-tdb_fd.c', 'test/api-81-seqnum.c', 'test/api-82-lockattr.c', 'test/api-83-openhook.c', 'test/api-91-get-stats.c', 'test/api-92-get-set-readonly.c', 'test/api-93-repack.c', 'test/api-94-expand-during-parse.c', 'test/api-95-read-only-during-parse.c', 'test/api-add-remove-flags.c', 'test/api-check-callback.c', 'test/api-firstkey-nextkey.c', 'test/api-fork-test.c', 'test/api-locktimeout.c', 'test/api-missing-entries.c', 'test/api-open-multiple-times.c', 'test/api-record-expand.c', 'test/api-simple-delete.c', 'test/api-summary.c'] conf.env.NTDB_TEST_API_HELPER_SRC=['test/helpapi-external-agent.c'] conf.env.NTDB_TEST_RUN_HELPER_SRC=['test/helprun-external-agent.c', 'test/helprun-layout.c'] conf.env.NTDB_TEST_HELPER_SRC=['test/external-agent.c', 'test/failtest_helper.c', 'test/lock-tracking.c', 'test/logging.c', 'test/tap-interface.c'] conf.env.standalone_ntdb = conf.IN_LAUNCH_DIR() conf.env.disable_python = getattr(Options.options, 'disable_python', False) if not conf.env.standalone_ntdb: if conf.CHECK_BUNDLED_SYSTEM('ntdb', minversion=VERSION, implied_deps='replace'): conf.define('USING_SYSTEM_NTDB', 1) if conf.CHECK_BUNDLED_SYSTEM_PYTHON('pyntdb', 'ntdb', minversion=VERSION): conf.define('USING_SYSTEM_PYNTDB', 1) if not conf.env.disable_python: # also disable if we don't have the python libs installed conf.find_program('python', var='PYTHON') conf.check_tool('python') conf.check_python_version((2,4,2)) conf.SAMBA_CHECK_PYTHON_HEADERS(mandatory=False) if not conf.env.HAVE_PYTHON_H: Logs.warn('Disabling pyntdb as python devel libs not found') conf.env.disable_python = True conf.CHECK_XSLTPROC_MANPAGES() # This make #include work. conf.ADD_EXTRA_INCLUDES('''#lib''') conf.SAMBA_CONFIG_H() def build(bld): bld.RECURSE('lib/replace') bld.RECURSE('lib/ccan') if bld.env.standalone_ntdb: bld.env.PKGCONFIGDIR = '${LIBDIR}/pkgconfig' private_library = False else: private_library = True SRC = '''check.c free.c hash.c io.c lock.c open.c summary.c ntdb.c transaction.c traverse.c''' if not bld.CONFIG_SET('USING_SYSTEM_NTDB'): NTDB_CCAN='ccan-likely ccan-ilog ccan-hash ccan-tally' bld.SAMBA_LIBRARY('ntdb', SRC, deps='replace ' + NTDB_CCAN , includes='.', abi_directory='ABI', abi_match='ntdb_*', hide_symbols=True, vnum=VERSION, public_headers='ntdb.h', public_headers_install=not private_library, pc_files='ntdb.pc', private_library=private_library, manpages='man/ntdb.3') bld.SAMBA_BINARY('ntdbtorture', 'tools/ntdbtorture.c', deps='ntdb ccan-err', install=False) bld.SAMBA_BINARY('ntdbtool', 'tools/ntdbtool.c', deps='ntdb', manpages='man/ntdbtool.8') bld.SAMBA_BINARY('ntdbdump', 'tools/ntdbdump.c', deps='ntdb', manpages='man/ntdbdump.8') bld.SAMBA_BINARY('ntdbrestore', 'tools/ntdbrestore.c', deps='ntdb', manpages='man/ntdbrestore.8') bld.SAMBA_BINARY('ntdbbackup', 'tools/ntdbbackup.c', deps='ntdb', manpages='man/ntdbbackup.8') if bld.env.DEVELOPER_MODE: # FIXME: We need CCAN for some API tests, but waf thinks it's # already available via ntdb. It is, but not publicly. # Workaround is to build a private, non-hiding version. bld.SAMBA_SUBSYSTEM('ntdb-testing', SRC, deps='replace ' + NTDB_CCAN, includes='.') bld.SAMBA_SUBSYSTEM('ntdb-test-helpers', bld.env.NTDB_TEST_HELPER_SRC, deps='replace') bld.SAMBA_SUBSYSTEM('ntdb-run-helpers', bld.env.NTDB_TEST_RUN_HELPER_SRC, deps='replace') bld.SAMBA_SUBSYSTEM('ntdb-api-helpers', bld.env.NTDB_TEST_API_HELPER_SRC, deps='replace') for f in bld.env.NTDB_TEST_RUN_SRC: base = os.path.splitext(os.path.basename(f))[0] bld.SAMBA_BINARY('ntdb-' + base, f, deps=NTDB_CCAN + ' ccan-failtest ntdb-test-helpers ntdb-run-helpers', install=False) for f in bld.env.NTDB_TEST_API_SRC: base = os.path.splitext(os.path.basename(f))[0] bld.SAMBA_BINARY('ntdb-' + base, f, deps='ntdb-test-helpers ntdb-api-helpers ntdb-testing', install=False) if not bld.CONFIG_SET('USING_SYSTEM_PYNTDB'): bld.SAMBA_PYTHON('pyntdb', source='pyntdb.c', deps='ntdb', enabled=not bld.env.disable_python, realname='ntdb.so', cflags='-DPACKAGE_VERSION=\"%s\"' % VERSION) def testonly(ctx): '''run ntdb testsuite''' import Utils, samba_utils, shutil ecode = 0; env = samba_utils.LOAD_ENVIRONMENT() if env.standalone_ntdb: # FIXME: This is horrible :( test_prefix = "%s/st" % (Utils.g_module.blddir) shutil.rmtree(test_prefix, ignore_errors=True) os.makedirs(test_prefix) # Create scratch directory for tests. testdir = os.path.join(test_prefix, 'ntdb-tests') samba_utils.mkdir_p(testdir) # Symlink back to source dir so it can find tests in test/ link = os.path.join(testdir, 'test') if not os.path.exists(link): os.symlink(os.path.abspath(os.path.join(env.cwd, 'test')), link) if env.options['VALGRIND']: os.environ['VALGRIND'] = 'valgrind -q --num-callers=30 --error-exitcode=11' if env.options['VALGRINDLOG']: os.environ['VALGRIND'] += ' --log-file=%s' % Options.options.VALGRINDLOG for f in env.NTDB_TEST_RUN_SRC + env.NTDB_TEST_API_SRC: name = "ntdb-" + os.path.splitext(os.path.basename(f))[0] cmd = "cd " + testdir + " && $VALGRIND " + os.path.abspath(os.path.join(Utils.g_module.blddir, name)) + " > test-output 2>&1" print("..." + f) ret = samba_utils.RUN_COMMAND(cmd) if ret != 0: print("%s (%s) failed:" % (name, f)) samba_utils.RUN_COMMAND("cat " + os.path.join(testdir, 'test-output')) ecode = ret; break; sys.exit(ecode) # WAF doesn't build the unit tests for this, maybe because they don't link with ntdb? # This forces it def test(ctx): import Scripting Scripting.commands.append('build') Scripting.commands.append('testonly') def dist(): '''makes a tarball for distribution''' samba_dist.dist() def reconfigure(ctx): '''reconfigure if config scripts have changed''' import samba_utils samba_utils.reconfigure(ctx)