pax_global_header 0000666 0000000 0000000 00000000064 12241515307 0014512 g ustar 00root root 0000000 0000000 52 comment=14d20ec9b386aaf657cdf394865edbfbbe3fa850
ntdb-1.0/ 0000775 0000000 0000000 00000000000 12241515307 0012301 5 ustar 00root root 0000000 0000000 ntdb-1.0/ABI/ 0000775 0000000 0000000 00000000000 12241515307 0012674 5 ustar 00root root 0000000 0000000 ntdb-1.0/ABI/ntdb-0.9.sigs 0000664 0000000 0000000 00000005015 12241515307 0015017 0 ustar 00root root 0000000 0000000 ntdb_add_flag: void (struct ntdb_context *, unsigned int)
ntdb_append: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA, NTDB_DATA)
ntdb_chainlock: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA)
ntdb_chainlock_read: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA)
ntdb_chainunlock: void (struct ntdb_context *, NTDB_DATA)
ntdb_chainunlock_read: void (struct ntdb_context *, NTDB_DATA)
ntdb_check_: enum NTDB_ERROR (struct ntdb_context *, enum NTDB_ERROR (*)(NTDB_DATA, NTDB_DATA, void *), void *)
ntdb_close: int (struct ntdb_context *)
ntdb_delete: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA)
ntdb_errorstr: const char *(enum NTDB_ERROR)
ntdb_exists: bool (struct ntdb_context *, NTDB_DATA)
ntdb_fd: int (const struct ntdb_context *)
ntdb_fetch: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA, NTDB_DATA *)
ntdb_firstkey: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA *)
ntdb_foreach_: void (int (*)(struct ntdb_context *, void *), void *)
ntdb_get_attribute: enum NTDB_ERROR (struct ntdb_context *, union ntdb_attribute *)
ntdb_get_flags: unsigned int (struct ntdb_context *)
ntdb_get_seqnum: int64_t (struct ntdb_context *)
ntdb_lockall: enum NTDB_ERROR (struct ntdb_context *)
ntdb_lockall_read: enum NTDB_ERROR (struct ntdb_context *)
ntdb_name: const char *(const struct ntdb_context *)
ntdb_nextkey: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA *)
ntdb_open: struct ntdb_context *(const char *, int, int, mode_t, union ntdb_attribute *)
ntdb_parse_record_: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA, enum NTDB_ERROR (*)(NTDB_DATA, NTDB_DATA, void *), void *)
ntdb_remove_flag: void (struct ntdb_context *, unsigned int)
ntdb_repack: enum NTDB_ERROR (struct ntdb_context *)
ntdb_set_attribute: enum NTDB_ERROR (struct ntdb_context *, const union ntdb_attribute *)
ntdb_store: enum NTDB_ERROR (struct ntdb_context *, NTDB_DATA, NTDB_DATA, int)
ntdb_summary: enum NTDB_ERROR (struct ntdb_context *, enum ntdb_summary_flags, char **)
ntdb_transaction_cancel: void (struct ntdb_context *)
ntdb_transaction_commit: enum NTDB_ERROR (struct ntdb_context *)
ntdb_transaction_prepare_commit: enum NTDB_ERROR (struct ntdb_context *)
ntdb_transaction_start: enum NTDB_ERROR (struct ntdb_context *)
ntdb_traverse_: int64_t (struct ntdb_context *, int (*)(struct ntdb_context *, NTDB_DATA, NTDB_DATA, void *), void *)
ntdb_unlockall: void (struct ntdb_context *)
ntdb_unlockall_read: void (struct ntdb_context *)
ntdb_unset_attribute: void (struct ntdb_context *, enum ntdb_attribute_type)
ntdb_wipe_all: enum NTDB_ERROR (struct ntdb_context *)
ntdb-1.0/LICENSE 0000664 0000000 0000000 00000016725 12241515307 0013321 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
ntdb-1.0/Makefile 0000664 0000000 0000000 00000001563 12241515307 0013746 0 ustar 00root root 0000000 0000000 # simple makefile wrapper to run waf
WAF=WAF_MAKE=1 PATH=buildtools/bin:../../buildtools/bin:$$PATH waf
all:
$(WAF) build
install:
$(WAF) install
uninstall:
$(WAF) uninstall
test: FORCE
$(WAF) test $(TEST_OPTIONS)
testenv:
$(WAF) test --testenv $(TEST_OPTIONS)
quicktest:
$(WAF) test --quick $(TEST_OPTIONS)
dist:
touch .tmplock
WAFLOCK=.tmplock $(WAF) dist
distcheck:
touch .tmplock
WAFLOCK=.tmplock $(WAF) distcheck
clean:
$(WAF) clean
distclean:
$(WAF) distclean
reconfigure: configure
$(WAF) reconfigure
show_waf_options:
$(WAF) --help
# some compatibility make targets
everything: all
testsuite: all
.PHONY: check
check: test
torture: all
# this should do an install as well, once install is finished
installcheck: test
etags:
$(WAF) etags
ctags:
$(WAF) ctags
pydoctor:
$(WAF) pydoctor
bin/%:: FORCE
$(WAF) --targets=`basename $@`
FORCE:
ntdb-1.0/buildtools/ 0000775 0000000 0000000 00000000000 12241515307 0014461 5 ustar 00root root 0000000 0000000 ntdb-1.0/buildtools/README 0000664 0000000 0000000 00000000562 12241515307 0015344 0 ustar 00root root 0000000 0000000 See http://code.google.com/p/waf/ for more information on waf
You can get a svn copy of the upstream source with:
svn checkout http://waf.googlecode.com/svn/trunk/ waf-read-only
Samba currently uses waf 1.5, which can be found at:
http://waf.googlecode.com/svn/branches/waf-1.5
To update the current copy of waf, use the update-waf.sh script in this
directory.
ntdb-1.0/buildtools/bin/ 0000775 0000000 0000000 00000000000 12241515307 0015231 5 ustar 00root root 0000000 0000000 ntdb-1.0/buildtools/bin/waf 0000775 0000000 0000000 00000004321 12241515307 0015734 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: ISO-8859-1
# Thomas Nagy, 2005-2010
"""
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys
if sys.hexversion<0x203000f: raise ImportError("Waf requires Python >= 2.3")
if 'PSYCOWAF' in os.environ:
try:import psyco;psyco.full()
except:pass
VERSION="1.5.19"
REVISION="x"
INSTALL="x"
C1='x'
C2='x'
cwd = os.getcwd()
join = os.path.join
WAF='waf'
def b(x):
return x
if sys.hexversion>0x300000f:
WAF='waf3'
def b(x):
return x.encode()
def err(m):
print(('\033[91mError: %s\033[0m' % m))
sys.exit(1)
def test(dir):
try: os.stat(join(dir, 'wafadmin')); return os.path.abspath(dir)
except OSError: pass
def find_lib():
return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
wafdir = find_lib()
w = join(wafdir, 'wafadmin')
t = join(w, 'Tools')
f = join(w, '3rdparty')
sys.path = [w, t, f] + sys.path
if __name__ == '__main__':
import Scripting
Scripting.prepare(t, cwd, VERSION, wafdir)
ntdb-1.0/buildtools/compare_config_h4.sh 0000775 0000000 0000000 00000000502 12241515307 0020363 0 ustar 00root root 0000000 0000000 #!/bin/sh
# compare the generated config.h from a waf build with existing samba
# build
grep "^.define" bin/default/source4/include/config.h | sort > waf-config.h
grep "^.define" $HOME/samba_old/source4/include/config.h | sort > old-config.h
comm -23 old-config.h waf-config.h
#echo
#diff -u old-config.h waf-config.h
ntdb-1.0/buildtools/compare_generated.sh 0000775 0000000 0000000 00000002512 12241515307 0020464 0 ustar 00root root 0000000 0000000 #!/bin/sh
# compare the generated files from a waf
old_build=$HOME/samba_old
gen_files=$(cd bin/default && find . -type f -name '*.[ch]')
2>&1
strip_file()
{
in_file=$1
out_file=$2
cat $in_file |
grep -v 'The following definitions come from' |
grep -v 'Automatically generated at' |
grep -v 'Generated from' |
sed 's|/home/tnagy/samba/source4||g' |
sed 's|/home/tnagy/samba/|../|g' |
sed 's|bin/default/source4/||g' |
sed 's|bin/default/|../|g' |
sed 's/define _____/define ___/g' |
sed 's/define __*/define _/g' |
sed 's/define _DEFAULT_/define _/g' |
sed 's/define _SOURCE4_/define ___/g' |
sed 's/define ___/define _/g' |
sed 's/ifndef ___/ifndef _/g' |
sed 's|endif /* ____|endif /* __|g' |
sed s/__DEFAULT_SOURCE4/__/ |
sed s/__DEFAULT_SOURCE4/__/ |
sed s/__DEFAULT/____/ > $out_file
}
compare_file()
{
f=$f
bname=$(basename $f)
t1=/tmp/$bname.old.$$
t2=/tmp/$bname.new.$$
strip_file $old_build/$f $t1
strip_file bin/default/$f $t2
diff -u -b $t1 $t2 2>&1
rm -f $t1 $t2
}
for f in $gen_files; do
compare_file $f
done
ntdb-1.0/buildtools/compare_install.sh 0000775 0000000 0000000 00000000212 12241515307 0020167 0 ustar 00root root 0000000 0000000 #!/bin/sh
prefix1="$1"
prefix2="$2"
(cd $prefix1 && find . ) | sort > p1.txt
(cd $prefix2 && find . ) | sort > p2.txt
diff -u p[12].txt
ntdb-1.0/buildtools/scripts/ 0000775 0000000 0000000 00000000000 12241515307 0016150 5 ustar 00root root 0000000 0000000 ntdb-1.0/buildtools/scripts/Makefile.waf 0000664 0000000 0000000 00000001767 12241515307 0020377 0 ustar 00root root 0000000 0000000 # simple makefile wrapper to run waf
WAF_BINARY=BUILDTOOLS/bin/waf
WAF=WAF_MAKE=1 $(WAF_BINARY)
all:
$(WAF) build
install:
$(WAF) install
uninstall:
$(WAF) uninstall
test:
$(WAF) test $(TEST_OPTIONS)
help:
@echo NOTE: to run extended waf options use $(WAF_BINARY) or modify your PATH
$(WAF) --help
testenv:
$(WAF) test --testenv $(TEST_OPTIONS)
quicktest:
$(WAF) test --quick $(TEST_OPTIONS)
dist:
$(WAF) dist
distcheck:
$(WAF) distcheck
clean:
$(WAF) clean
distclean:
$(WAF) distclean
reconfigure: configure
$(WAF) reconfigure
show_waf_options:
$(WAF) --help
# some compatibility make targets
everything: all
testsuite: all
check: test
torture: all
# this should do an install as well, once install is finished
installcheck: test
etags:
$(WAF) etags
ctags:
$(WAF) ctags
bin/%:: FORCE
$(WAF) --targets=$@
FORCE:
configure: autogen-waf.sh BUILDTOOLS/scripts/configure.waf
./autogen-waf.sh
Makefile: autogen-waf.sh configure BUILDTOOLS/scripts/Makefile.waf
./autogen-waf.sh
ntdb-1.0/buildtools/scripts/abi_gen.sh 0000775 0000000 0000000 00000000753 12241515307 0020100 0 ustar 00root root 0000000 0000000 #!/bin/sh
# generate a set of ABI signatures from a shared library
SHAREDLIB="$1"
GDBSCRIPT="gdb_syms.$$"
(
cat < $GDBSCRIPT
# forcing the terminal avoids a problem on Fedora12
TERM=none gdb -batch -x $GDBSCRIPT "$SHAREDLIB" < /dev/null
rm -f $GDBSCRIPT
ntdb-1.0/buildtools/scripts/autogen-waf.sh 0000775 0000000 0000000 00000001352 12241515307 0020725 0 ustar 00root root 0000000 0000000 #!/bin/sh
p=`dirname $0`
echo "Setting up for waf build"
echo "Looking for the buildtools directory"
d="buildtools"
while test \! -d "$p/$d"; do d="../$d"; done
echo "Found buildtools in $p/$d"
echo "Setting up configure"
rm -f $p/configure $p/include/config*.h*
sed "s|BUILDTOOLS|$d|g;s|BUILDPATH|$p|g" < "$p/$d/scripts/configure.waf" > $p/configure
chmod +x $p/configure
echo "Setting up Makefile"
rm -f $p/makefile $p/Makefile
sed "s|BUILDTOOLS|$d|g" < "$p/$d/scripts/Makefile.waf" > $p/Makefile
echo "done. Now run $p/configure or $p/configure.developer then make."
if [ $p != "." ]; then
echo "Notice: The build invoke path is not 'source4'! Use make with the parameter"
echo "-C <'source4' path>. Example: make -C source4 all"
fi
ntdb-1.0/buildtools/scripts/configure.waf 0000775 0000000 0000000 00000000371 12241515307 0020634 0 ustar 00root root 0000000 0000000 #!/bin/sh
PREVPATH=`dirname $0`
WAF=BUILDTOOLS/bin/waf
# using JOBS=1 gives maximum compatibility with
# systems like AIX which have broken threading in python
JOBS=1
export JOBS
cd BUILDPATH || exit 1
$WAF configure "$@" || exit 1
cd $PREVPATH
ntdb-1.0/buildtools/testwaf.sh 0000775 0000000 0000000 00000002600 12241515307 0016473 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -e
set -x
d=$(dirname $0)
cd $d/..
PREFIX=$HOME/testprefix
if [ $# -gt 0 ]; then
tests="$*"
else
tests="lib/replace lib/talloc lib/tevent lib/tdb lib/ldb"
fi
echo "testing in dirs $tests"
for d in $tests; do
echo "`date`: testing $d"
pushd $d
rm -rf bin
type waf
waf dist
./configure -C --enable-developer --prefix=$PREFIX
time make
make install
make distcheck
case $d in
"lib/ldb")
ldd bin/ldbadd
;;
"lib/replace")
ldd bin/replace_testsuite
;;
"lib/talloc")
ldd bin/talloc_testsuite
;;
"lib/tdb")
ldd bin/tdbtool
;;
esac
popd
done
echo "testing python portability"
pushd lib/talloc
versions="python2.4 python2.5 python2.6 python3.0 python3.1"
for p in $versions; do
ret=$(which $p || echo "failed")
if [ $ret = "failed" ]; then
echo "$p not found, skipping"
continue
fi
echo "Testing $p"
$p ../../buildtools/bin/waf configure -C --enable-developer --prefix=$PREFIX
$p ../../buildtools/bin/waf build install
done
popd
echo "testing cross compiling"
pushd lib/talloc
ret=$(which arm-linux-gnueabi-gcc || echo "failed")
if [ $ret != "failed" ]; then
CC=arm-linux-gnueabi-gcc ./configure -C --prefix=$PREFIX --cross-compile --cross-execute='runarm'
make && make install
else
echo "Cross-compiler not installed, skipping test"
fi
popd
ntdb-1.0/buildtools/update-waf.sh 0000775 0000000 0000000 00000000417 12241515307 0017057 0 ustar 00root root 0000000 0000000 #!/bin/sh
# Update our copy of waf
TARGETDIR="`dirname $0`"
WORKDIR="`mktemp -d -t update-waf-XXXXXX`"
mkdir -p "$WORKDIR"
git clone https://code.google.com/p/waf.waf15/ "$WORKDIR"
rsync -C -avz --delete "$WORKDIR/wafadmin/" "$TARGETDIR/wafadmin/"
rm -rf "$WORKDIR"
ntdb-1.0/buildtools/wafadmin/ 0000775 0000000 0000000 00000000000 12241515307 0016247 5 ustar 00root root 0000000 0000000 ntdb-1.0/buildtools/wafadmin/3rdparty/ 0000775 0000000 0000000 00000000000 12241515307 0020017 5 ustar 00root root 0000000 0000000 ntdb-1.0/buildtools/wafadmin/3rdparty/ParallelDebug.py 0000664 0000000 0000000 00000020532 12241515307 0023076 0 ustar 00root root 0000000 0000000 #! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2007-2010 (ita)
"""
debugging helpers for parallel compilation, outputs
a svg file in the build directory
"""
import os, time, sys, threading
try: from Queue import Queue
except: from queue import Queue
import Runner, Options, Utils, Task, Logs
from Constants import *
#import random
#random.seed(100)
def set_options(opt):
opt.add_option('--dtitle', action='store', default='Parallel build representation for %r' % ' '.join(sys.argv),
help='title for the svg diagram', dest='dtitle')
opt.add_option('--dwidth', action='store', type='int', help='diagram width', default=1000, dest='dwidth')
opt.add_option('--dtime', action='store', type='float', help='recording interval in seconds', default=0.009, dest='dtime')
opt.add_option('--dband', action='store', type='int', help='band width', default=22, dest='dband')
opt.add_option('--dmaxtime', action='store', type='float', help='maximum time, for drawing fair comparisons', default=0, dest='dmaxtime')
# red #ff4d4d
# green #4da74d
# lila #a751ff
color2code = {
'GREEN' : '#4da74d',
'YELLOW' : '#fefe44',
'PINK' : '#a751ff',
'RED' : '#cc1d1d',
'BLUE' : '#6687bb',
'CYAN' : '#34e2e2',
}
mp = {}
info = [] # list of (text,color)
def map_to_color(name):
if name in mp:
return mp[name]
try:
cls = Task.TaskBase.classes[name]
except KeyError:
return color2code['RED']
if cls.color in mp:
return mp[cls.color]
if cls.color in color2code:
return color2code[cls.color]
return color2code['RED']
def loop(self):
while 1:
tsk=Runner.TaskConsumer.ready.get()
tsk.master.set_running(1, id(threading.currentThread()), tsk)
Runner.process_task(tsk)
tsk.master.set_running(-1, id(threading.currentThread()), tsk)
Runner.TaskConsumer.loop = loop
old_start = Runner.Parallel.start
def do_start(self):
print Options.options
try:
Options.options.dband
except AttributeError:
raise ValueError('use def options(opt): opt.load("parallel_debug")!')
self.taskinfo = Queue()
old_start(self)
process_colors(self)
Runner.Parallel.start = do_start
def set_running(self, by, i, tsk):
self.taskinfo.put( (i, id(tsk), time.time(), tsk.__class__.__name__, self.processed, self.count, by) )
Runner.Parallel.set_running = set_running
def name2class(name):
return name.replace(' ', '_').replace('.', '_')
def process_colors(producer):
# first, cast the parameters
tmp = []
try:
while True:
tup = producer.taskinfo.get(False)
tmp.append(list(tup))
except:
pass
try:
ini = float(tmp[0][2])
except:
return
if not info:
seen = []
for x in tmp:
name = x[3]
if not name in seen:
seen.append(name)
else:
continue
info.append((name, map_to_color(name)))
info.sort(key=lambda x: x[0])
thread_count = 0
acc = []
for x in tmp:
thread_count += x[6]
acc.append("%d %d %f %r %d %d %d" % (x[0], x[1], x[2] - ini, x[3], x[4], x[5], thread_count))
f = open('pdebug.dat', 'w')
#Utils.write('\n'.join(acc))
f.write('\n'.join(acc))
tmp = [lst[:2] + [float(lst[2]) - ini] + lst[3:] for lst in tmp]
st = {}
for l in tmp:
if not l[0] in st:
st[l[0]] = len(st.keys())
tmp = [ [st[lst[0]]] + lst[1:] for lst in tmp ]
THREAD_AMOUNT = len(st.keys())
st = {}
for l in tmp:
if not l[1] in st:
st[l[1]] = len(st.keys())
tmp = [ [lst[0]] + [st[lst[1]]] + lst[2:] for lst in tmp ]
BAND = Options.options.dband
seen = {}
acc = []
for x in range(len(tmp)):
line = tmp[x]
id = line[1]
if id in seen:
continue
seen[id] = True
begin = line[2]
thread_id = line[0]
for y in range(x + 1, len(tmp)):
line = tmp[y]
if line[1] == id:
end = line[2]
#print id, thread_id, begin, end
#acc.append( ( 10*thread_id, 10*(thread_id+1), 10*begin, 10*end ) )
acc.append( (BAND * begin, BAND*thread_id, BAND*end - BAND*begin, BAND, line[3]) )
break
if Options.options.dmaxtime < 0.1:
gwidth = 1
for x in tmp:
m = BAND * x[2]
if m > gwidth:
gwidth = m
else:
gwidth = BAND * Options.options.dmaxtime
ratio = float(Options.options.dwidth) / gwidth
gwidth = Options.options.dwidth
gheight = BAND * (THREAD_AMOUNT + len(info) + 1.5)
out = []
out.append("""
")
#node = producer.bld.path.make_node('pdebug.svg')
f = open('pdebug.svg', 'w')
f.write("".join(out))
ntdb-1.0/buildtools/wafadmin/3rdparty/batched_cc.py 0000664 0000000 0000000 00000011056 12241515307 0022433 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"""
Batched builds - compile faster
instead of compiling object files one by one, c/c++ compilers are often able to compile at once:
cc -c ../file1.c ../file2.c ../file3.c
Files are output on the directory where the compiler is called, and dependencies are more difficult
to track (do not run the command on all source files if only one file changes)
As such, we do as if the files were compiled one by one, but no command is actually run:
replace each cc/cpp Task by a TaskSlave
A new task called TaskMaster collects the signatures from each slave and finds out the command-line
to run.
To set this up, the method ccroot::create_task is replaced by a new version, to enable batched builds
it is only necessary to import this module in the configuration (no other change required)
"""
MAX_BATCH = 50
MAXPARALLEL = False
EXT_C = ['.c', '.cc', '.cpp', '.cxx']
import os, threading
import TaskGen, Task, ccroot, Build, Logs
from TaskGen import extension, feature, before
from Constants import *
cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} -c ${SRCLST}'
cc_fun = Task.compile_fun_noshell('batched_cc', cc_str)[0]
cxx_str = '${CXX} ${CXXFLAGS} ${CPPFLAGS} ${_CXXINCFLAGS} ${_CXXDEFFLAGS} -c ${SRCLST}'
cxx_fun = Task.compile_fun_noshell('batched_cxx', cxx_str)[0]
count = 70000
class batch_task(Task.Task):
color = 'RED'
after = 'cc cxx'
before = 'cc_link cxx_link static_link'
def __str__(self):
return '(batch compilation for %d slaves)\n' % len(self.slaves)
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
self.slaves = []
self.inputs = []
self.hasrun = 0
global count
count += 1
self.idx = count
def add_slave(self, slave):
self.slaves.append(slave)
self.set_run_after(slave)
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
for t in self.slaves:
#if t.executed:
if t.hasrun != SKIPPED:
return RUN_ME
return SKIP_ME
def run(self):
outputs = []
self.outputs = []
srclst = []
slaves = []
for t in self.slaves:
if t.hasrun != SKIPPED:
slaves.append(t)
srclst.append(t.inputs[0].abspath(self.env))
self.env.SRCLST = srclst
self.cwd = slaves[0].inputs[0].parent.abspath(self.env)
env = self.env
app = env.append_unique
cpppath_st = env['CPPPATH_ST']
env._CCINCFLAGS = env.CXXINCFLAGS = []
# local flags come first
# set the user-defined includes paths
for i in env['INC_PATHS']:
app('_CCINCFLAGS', cpppath_st % i.abspath())
app('_CXXINCFLAGS', cpppath_st % i.abspath())
app('_CCINCFLAGS', cpppath_st % i.abspath(env))
app('_CXXINCFLAGS', cpppath_st % i.abspath(env))
# set the library include paths
for i in env['CPPPATH']:
app('_CCINCFLAGS', cpppath_st % i)
app('_CXXINCFLAGS', cpppath_st % i)
if self.slaves[0].__class__.__name__ == 'cc':
ret = cc_fun(self)
else:
ret = cxx_fun(self)
if ret:
return ret
for t in slaves:
t.old_post_run()
from TaskGen import extension, feature, after
import cc, cxx
def wrap(fun):
def foo(self, node):
# we cannot control the extension, this sucks
self.obj_ext = '.o'
task = fun(self, node)
if not getattr(self, 'masters', None):
self.masters = {}
self.allmasters = []
if not node.parent.id in self.masters:
m = self.masters[node.parent.id] = self.master = self.create_task('batch')
self.allmasters.append(m)
else:
m = self.masters[node.parent.id]
if len(m.slaves) > MAX_BATCH:
m = self.masters[node.parent.id] = self.master = self.create_task('batch')
self.allmasters.append(m)
m.add_slave(task)
return task
return foo
c_hook = wrap(cc.c_hook)
extension(cc.EXT_CC)(c_hook)
cxx_hook = wrap(cxx.cxx_hook)
extension(cxx.EXT_CXX)(cxx_hook)
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_link')
def link_after_masters(self):
if getattr(self, 'allmasters', None):
for m in self.allmasters:
self.link_task.set_run_after(m)
for c in ['cc', 'cxx']:
t = Task.TaskBase.classes[c]
def run(self):
pass
def post_run(self):
#self.executed=1
pass
def can_retrieve_cache(self):
if self.old_can_retrieve_cache():
for m in self.generator.allmasters:
try:
m.slaves.remove(self)
except ValueError:
pass #this task wasn't included in that master
return 1
else:
return None
setattr(t, 'oldrun', t.__dict__['run'])
setattr(t, 'run', run)
setattr(t, 'old_post_run', t.post_run)
setattr(t, 'post_run', post_run)
setattr(t, 'old_can_retrieve_cache', t.can_retrieve_cache)
setattr(t, 'can_retrieve_cache', can_retrieve_cache)
ntdb-1.0/buildtools/wafadmin/3rdparty/boost.py 0000664 0000000 0000000 00000025242 12241515307 0021524 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
#
# partially based on boost.py written by Gernot Vormayr
# written by Ruediger Sonderfeld , 2008
# modified by Bjoern Michaelsen, 2008
# modified by Luca Fossati, 2008
# rewritten for waf 1.5.1, Thomas Nagy, 2008
#
#def set_options(opt):
# opt.tool_options('boost')
# # ...
#
#def configure(conf):
# # ... (e.g. conf.check_tool('g++'))
# conf.check_tool('boost')
# conf.check_boost(lib='signals filesystem', static='onlystatic', score_version=(-1000, 1000), tag_minscore=1000)
#
#def build(bld):
# bld(source='main.c', target='bar', uselib="BOOST BOOST_SYSTEM")
#
#ISSUES:
# * find_includes should be called only once!
# * support mandatory
######## boost update ###########
## ITA: * the method get_boost_version_number does work
## * the rest of the code has not really been tried
# * make certain a demo is provided (in demos/adv for example)
# TODO: bad and underdocumented code -> boost.py will be removed in waf 1.6 to be rewritten later
import os.path, glob, types, re, sys
import Configure, config_c, Options, Utils, Logs
from Logs import warn, debug
from Configure import conf
boost_code = '''
#include
#include
int main() { std::cout << BOOST_VERSION << std::endl; }
'''
boost_libpath = ['/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib']
boost_cpppath = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include']
STATIC_NOSTATIC = 'nostatic'
STATIC_BOTH = 'both'
STATIC_ONLYSTATIC = 'onlystatic'
is_versiontag = re.compile('^\d+_\d+_?\d*$')
is_threadingtag = re.compile('^mt$')
is_abitag = re.compile('^[sgydpn]+$')
is_toolsettag = re.compile('^(acc|borland|como|cw|dmc|darwin|gcc|hp_cxx|intel|kylix|vc|mgw|qcc|sun|vacpp)\d*$')
is_pythontag=re.compile('^py[0-9]{2}$')
def set_options(opt):
opt.add_option('--boost-includes', type='string', default='', dest='boostincludes', help='path to the boost directory where the includes are e.g. /usr/local/include/boost-1_35')
opt.add_option('--boost-libs', type='string', default='', dest='boostlibs', help='path to the directory where the boost libs are e.g. /usr/local/lib')
def string_to_version(s):
version = s.split('.')
if len(version) < 3: return 0
return int(version[0])*100000 + int(version[1])*100 + int(version[2])
def version_string(version):
major = version / 100000
minor = version / 100 % 1000
minor_minor = version % 100
if minor_minor == 0:
return "%d_%d" % (major, minor)
else:
return "%d_%d_%d" % (major, minor, minor_minor)
def libfiles(lib, pattern, lib_paths):
result = []
for lib_path in lib_paths:
libname = pattern % ('boost_%s[!_]*' % lib)
result += glob.glob(os.path.join(lib_path, libname))
return result
@conf
def get_boost_version_number(self, dir):
"""silently retrieve the boost version number"""
try:
return self.run_c_code(compiler='cxx', code=boost_code, includes=dir, execute=1, env=self.env.copy(), type='cprogram', compile_mode='cxx', compile_filename='test.cpp')
except Configure.ConfigurationError, e:
return -1
def set_default(kw, var, val):
if not var in kw:
kw[var] = val
def tags_score(tags, kw):
"""
checks library tags
see http://www.boost.org/doc/libs/1_35_0/more/getting_started/unix-variants.html 6.1
"""
score = 0
needed_tags = {
'threading': kw['tag_threading'],
'abi': kw['tag_abi'],
'toolset': kw['tag_toolset'],
'version': kw['tag_version'],
'python': kw['tag_python']
}
if kw['tag_toolset'] is None:
v = kw['env']
toolset = v['CXX_NAME']
if v['CXX_VERSION']:
version_no = v['CXX_VERSION'].split('.')
toolset += version_no[0]
if len(version_no) > 1:
toolset += version_no[1]
needed_tags['toolset'] = toolset
found_tags = {}
for tag in tags:
if is_versiontag.match(tag): found_tags['version'] = tag
if is_threadingtag.match(tag): found_tags['threading'] = tag
if is_abitag.match(tag): found_tags['abi'] = tag
if is_toolsettag.match(tag): found_tags['toolset'] = tag
if is_pythontag.match(tag): found_tags['python'] = tag
for tagname in needed_tags.iterkeys():
if needed_tags[tagname] is not None and tagname in found_tags:
if re.compile(needed_tags[tagname]).match(found_tags[tagname]):
score += kw['score_' + tagname][0]
else:
score += kw['score_' + tagname][1]
return score
@conf
def validate_boost(self, kw):
ver = kw.get('version', '')
for x in 'min_version max_version version'.split():
set_default(kw, x, ver)
set_default(kw, 'lib', '')
kw['lib'] = Utils.to_list(kw['lib'])
set_default(kw, 'env', self.env)
set_default(kw, 'libpath', boost_libpath)
set_default(kw, 'cpppath', boost_cpppath)
for x in 'tag_threading tag_version tag_toolset'.split():
set_default(kw, x, None)
set_default(kw, 'tag_abi', '^[^d]*$')
set_default(kw, 'python', str(sys.version_info[0]) + str(sys.version_info[1]) )
set_default(kw, 'tag_python', '^py' + kw['python'] + '$')
set_default(kw, 'score_threading', (10, -10))
set_default(kw, 'score_abi', (10, -10))
set_default(kw, 'score_python', (10,-10))
set_default(kw, 'score_toolset', (1, -1))
set_default(kw, 'score_version', (100, -100))
set_default(kw, 'score_min', 0)
set_default(kw, 'static', STATIC_NOSTATIC)
set_default(kw, 'found_includes', False)
set_default(kw, 'min_score', 0)
set_default(kw, 'errmsg', 'not found')
set_default(kw, 'okmsg', 'ok')
@conf
def find_boost_includes(self, kw):
"""
check every path in kw['cpppath'] for subdir
that either starts with boost- or is named boost.
Then the version is checked and selected accordingly to
min_version/max_version. The highest possible version number is
selected!
If no versiontag is set the versiontag is set accordingly to the
selected library and CPPPATH_BOOST is set.
"""
boostPath = getattr(Options.options, 'boostincludes', '')
if boostPath:
boostPath = [os.path.normpath(os.path.expandvars(os.path.expanduser(boostPath)))]
else:
boostPath = Utils.to_list(kw['cpppath'])
min_version = string_to_version(kw.get('min_version', ''))
max_version = string_to_version(kw.get('max_version', '')) or (sys.maxint - 1)
version = 0
for include_path in boostPath:
boost_paths = [p for p in glob.glob(os.path.join(include_path, 'boost*')) if os.path.isdir(p)]
debug('BOOST Paths: %r' % boost_paths)
for path in boost_paths:
pathname = os.path.split(path)[-1]
ret = -1
if pathname == 'boost':
path = include_path
ret = self.get_boost_version_number(path)
elif pathname.startswith('boost-'):
ret = self.get_boost_version_number(path)
ret = int(ret)
if ret != -1 and ret >= min_version and ret <= max_version and ret > version:
boost_path = path
version = ret
if not version:
self.fatal('boost headers not found! (required version min: %s max: %s)'
% (kw['min_version'], kw['max_version']))
return False
found_version = version_string(version)
versiontag = '^' + found_version + '$'
if kw['tag_version'] is None:
kw['tag_version'] = versiontag
elif kw['tag_version'] != versiontag:
warn('boost header version %r and tag_version %r do not match!' % (versiontag, kw['tag_version']))
env = self.env
env['CPPPATH_BOOST'] = boost_path
env['BOOST_VERSION'] = found_version
self.found_includes = 1
ret = 'Version %s (%s)' % (found_version, boost_path)
return ret
@conf
def find_boost_library(self, lib, kw):
def find_library_from_list(lib, files):
lib_pattern = re.compile('.*boost_(.*?)\..*')
result = (None, None)
resultscore = kw['min_score'] - 1
for file in files:
m = lib_pattern.search(file, 1)
if m:
libname = m.group(1)
libtags = libname.split('-')[1:]
currentscore = tags_score(libtags, kw)
if currentscore > resultscore:
result = (libname, file)
resultscore = currentscore
return result
lib_paths = getattr(Options.options, 'boostlibs', '')
if lib_paths:
lib_paths = [os.path.normpath(os.path.expandvars(os.path.expanduser(lib_paths)))]
else:
lib_paths = Utils.to_list(kw['libpath'])
v = kw.get('env', self.env)
(libname, file) = (None, None)
if kw['static'] in [STATIC_NOSTATIC, STATIC_BOTH]:
st_env_prefix = 'LIB'
files = libfiles(lib, v['shlib_PATTERN'], lib_paths)
(libname, file) = find_library_from_list(lib, files)
if libname is None and kw['static'] in [STATIC_ONLYSTATIC, STATIC_BOTH]:
st_env_prefix = 'STATICLIB'
staticLibPattern = v['staticlib_PATTERN']
if self.env['CC_NAME'] == 'msvc':
staticLibPattern = 'lib' + staticLibPattern
files = libfiles(lib, staticLibPattern, lib_paths)
(libname, file) = find_library_from_list(lib, files)
if libname is not None:
v['LIBPATH_BOOST_' + lib.upper()] = [os.path.split(file)[0]]
if self.env['CC_NAME'] == 'msvc' and os.path.splitext(file)[1] == '.lib':
v[st_env_prefix + '_BOOST_' + lib.upper()] = ['libboost_'+libname]
else:
v[st_env_prefix + '_BOOST_' + lib.upper()] = ['boost_'+libname]
return
self.fatal('lib boost_' + lib + ' not found!')
@conf
def check_boost(self, *k, **kw):
"""
This should be the main entry point
- min_version
- max_version
- version
- include_path
- lib_path
- lib
- toolsettag - None or a regexp
- threadingtag - None or a regexp
- abitag - None or a regexp
- versiontag - WARNING: you should rather use version or min_version/max_version
- static - look for static libs (values:
'nostatic' or STATIC_NOSTATIC - ignore static libs (default)
'both' or STATIC_BOTH - find static libs, too
'onlystatic' or STATIC_ONLYSTATIC - find only static libs
- score_version
- score_abi
- scores_threading
- score_toolset
* the scores are tuples (match_score, nomatch_score)
match_score is the added to the score if the tag is matched
nomatch_score is added when a tag is found and does not match
- min_score
"""
if not self.env['CXX']:
self.fatal('load a c++ compiler tool first, for example conf.check_tool("g++")')
self.validate_boost(kw)
ret = None
try:
if not kw.get('found_includes', None):
self.check_message_1(kw.get('msg_includes', 'boost headers'))
ret = self.find_boost_includes(kw)
except Configure.ConfigurationError, e:
if 'errmsg' in kw:
self.check_message_2(kw['errmsg'], 'YELLOW')
if 'mandatory' in kw:
if Logs.verbose > 1:
raise
else:
self.fatal('the configuration failed (see %r)' % self.log.name)
else:
if 'okmsg' in kw:
self.check_message_2(kw.get('okmsg_includes', ret))
for lib in kw['lib']:
self.check_message_1('library boost_'+lib)
try:
self.find_boost_library(lib, kw)
except Configure.ConfigurationError, e:
ret = False
if 'errmsg' in kw:
self.check_message_2(kw['errmsg'], 'YELLOW')
if 'mandatory' in kw:
if Logs.verbose > 1:
raise
else:
self.fatal('the configuration failed (see %r)' % self.log.name)
else:
if 'okmsg' in kw:
self.check_message_2(kw['okmsg'])
return ret
ntdb-1.0/buildtools/wafadmin/3rdparty/fluid.py 0000664 0000000 0000000 00000001543 12241515307 0021477 0 ustar 00root root 0000000 0000000 #!/usr/bin/python
# encoding: utf-8
# Grygoriy Fuchedzhy 2009
"""
Compile fluid files (fltk graphic library). Use the 'fluid' feature in conjuction with the 'cxx' feature.
"""
import Task
from TaskGen import extension
Task.simple_task_type('fluid', '${FLUID} -c -o ${TGT[0].abspath(env)} -h ${TGT[1].abspath(env)} ${SRC}', 'BLUE', shell=False, ext_out='.cxx')
@extension('.fl')
def fluid(self, node):
"""add the .fl to the source list; the cxx file generated will be compiled when possible"""
cpp = node.change_ext('.cpp')
hpp = node.change_ext('.hpp')
self.create_task('fluid', node, [cpp, hpp])
if 'cxx' in self.features:
self.allnodes.append(cpp)
def detect(conf):
fluid = conf.find_program('fluid', var='FLUID', mandatory=True)
conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True)
ntdb-1.0/buildtools/wafadmin/3rdparty/gccdeps.py 0000664 0000000 0000000 00000005423 12241515307 0022005 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2008-2010 (ita)
"""
Execute the tasks with gcc -MD, read the dependencies from the .d file
and prepare the dependency calculation for the next run
"""
import os, re, threading
import Task, Logs, Utils, preproc
from TaskGen import before, after, feature
lock = threading.Lock()
preprocessor_flag = '-MD'
@feature('cc')
@before('apply_core')
def add_mmd_cc(self):
if self.env.get_flat('CCFLAGS').find(preprocessor_flag) < 0:
self.env.append_value('CCFLAGS', preprocessor_flag)
@feature('cxx')
@before('apply_core')
def add_mmd_cxx(self):
if self.env.get_flat('CXXFLAGS').find(preprocessor_flag) < 0:
self.env.append_value('CXXFLAGS', preprocessor_flag)
def scan(self):
"the scanner does not do anything initially"
nodes = self.generator.bld.node_deps.get(self.unique_id(), [])
names = []
return (nodes, names)
re_o = re.compile("\.o$")
re_src = re.compile("^(\.\.)[\\/](.*)$")
def post_run(self):
# The following code is executed by threads, it is not safe, so a lock is needed...
if getattr(self, 'cached', None):
return Task.Task.post_run(self)
name = self.outputs[0].abspath(self.env)
name = re_o.sub('.d', name)
txt = Utils.readf(name)
#os.unlink(name)
txt = txt.replace('\\\n', '')
lst = txt.strip().split(':')
val = ":".join(lst[1:])
val = val.split()
nodes = []
bld = self.generator.bld
f = re.compile("^("+self.env.variant()+"|\.\.)[\\/](.*)$")
for x in val:
if os.path.isabs(x):
if not preproc.go_absolute:
continue
lock.acquire()
try:
node = bld.root.find_resource(x)
finally:
lock.release()
else:
g = re.search(re_src, x)
if g:
x = g.group(2)
lock.acquire()
try:
node = bld.bldnode.parent.find_resource(x)
finally:
lock.release()
else:
g = re.search(f, x)
if g:
x = g.group(2)
lock.acquire()
try:
node = bld.srcnode.find_resource(x)
finally:
lock.release()
if id(node) == id(self.inputs[0]):
# ignore the source file, it is already in the dependencies
# this way, successful config tests may be retrieved from the cache
continue
if not node:
raise ValueError('could not find %r for %r' % (x, self))
else:
nodes.append(node)
Logs.debug('deps: real scanner for %s returned %s' % (str(self), str(nodes)))
bld.node_deps[self.unique_id()] = nodes
bld.raw_deps[self.unique_id()] = []
try:
del self.cache_sig
except:
pass
Task.Task.post_run(self)
import Constants, Utils
def sig_implicit_deps(self):
try:
return Task.Task.sig_implicit_deps(self)
except Utils.WafError:
return Constants.SIG_NIL
for name in 'cc cxx'.split():
try:
cls = Task.TaskBase.classes[name]
except KeyError:
pass
else:
cls.post_run = post_run
cls.scan = scan
cls.sig_implicit_deps = sig_implicit_deps
ntdb-1.0/buildtools/wafadmin/3rdparty/go.py 0000664 0000000 0000000 00000006621 12241515307 0021003 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# go.py - Waf tool for the Go programming language
# By: Tom Wambold
import platform, os
import Task
import Utils
from TaskGen import feature, extension, after
Task.simple_task_type('gocompile', '${GOC} ${GOCFLAGS} -o ${TGT} ${SRC}', shell=False)
Task.simple_task_type('gopack', '${GOP} grc ${TGT} ${SRC}', shell=False)
Task.simple_task_type('golink', '${GOL} ${GOLFLAGS} -o ${TGT} ${SRC}', shell=False)
def detect(conf):
def set_def(var, val):
if not conf.env[var]:
conf.env[var] = val
goarch = os.getenv("GOARCH")
if goarch == '386':
set_def('GO_PLATFORM', 'i386')
elif goarch == 'amd64':
set_def('GO_PLATFORM', 'x86_64')
elif goarch == 'arm':
set_def('GO_PLATFORM', 'arm')
else:
set_def('GO_PLATFORM', platform.machine())
if conf.env.GO_PLATFORM == 'x86_64':
set_def('GO_COMPILER', '6g')
set_def('GO_LINKER', '6l')
set_def('GO_EXTENSION', '.6')
elif conf.env.GO_PLATFORM in ['i386', 'i486', 'i586', 'i686']:
set_def('GO_COMPILER', '8g')
set_def('GO_LINKER', '8l')
set_def('GO_EXTENSION', '.8')
elif conf.env.GO_PLATFORM == 'arm':
set_def('GO_COMPILER', '5g')
set_def('GO_LINKER', '5l')
set_def('GO_EXTENSION', '.5')
if not (conf.env.GO_COMPILER or conf.env.GO_LINKER or conf.env.GO_EXTENSION):
raise conf.fatal('Unsupported platform ' + platform.machine())
set_def('GO_PACK', 'gopack')
set_def('GO_PACK_EXTENSION', '.a')
conf.find_program(conf.env.GO_COMPILER, var='GOC', mandatory=True)
conf.find_program(conf.env.GO_LINKER, var='GOL', mandatory=True)
conf.find_program(conf.env.GO_PACK, var='GOP', mandatory=True)
conf.find_program('cgo', var='CGO', mandatory=True)
@extension('.go')
def compile_go(self, node):
try:
self.go_nodes.append(node)
except AttributeError:
self.go_nodes = [node]
@feature('go')
@after('apply_core')
def apply_compile_go(self):
try:
nodes = self.go_nodes
except AttributeError:
self.go_compile_task = None
else:
self.go_compile_task = self.create_task('gocompile',
nodes,
[self.path.find_or_declare(self.target + self.env.GO_EXTENSION)])
@feature('gopackage', 'goprogram')
@after('apply_compile_go')
def apply_goinc(self):
if not getattr(self, 'go_compile_task', None):
return
names = self.to_list(getattr(self, 'uselib_local', []))
for name in names:
obj = self.name_to_obj(name)
if not obj:
raise Utils.WafError('object %r was not found in uselib_local '
'(required by %r)' % (lib_name, self.name))
obj.post()
self.go_compile_task.set_run_after(obj.go_package_task)
self.go_compile_task.dep_nodes.extend(obj.go_package_task.outputs)
self.env.append_unique('GOCFLAGS', '-I' + obj.path.abspath(obj.env))
self.env.append_unique('GOLFLAGS', '-L' + obj.path.abspath(obj.env))
@feature('gopackage')
@after('apply_goinc')
def apply_gopackage(self):
self.go_package_task = self.create_task('gopack',
self.go_compile_task.outputs[0],
self.path.find_or_declare(self.target + self.env.GO_PACK_EXTENSION))
self.go_package_task.set_run_after(self.go_compile_task)
self.go_package_task.dep_nodes.extend(self.go_compile_task.outputs)
@feature('goprogram')
@after('apply_goinc')
def apply_golink(self):
self.go_link_task = self.create_task('golink',
self.go_compile_task.outputs[0],
self.path.find_or_declare(self.target))
self.go_link_task.set_run_after(self.go_compile_task)
self.go_link_task.dep_nodes.extend(self.go_compile_task.outputs)
ntdb-1.0/buildtools/wafadmin/3rdparty/lru_cache.py 0000664 0000000 0000000 00000004564 12241515307 0022327 0 ustar 00root root 0000000 0000000 #! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy 2011
import os, shutil, re
import Options, Build, Logs
"""
Apply a least recently used policy to the Waf cache.
For performance reasons, it is called after the build is complete.
We assume that the the folders are written atomically
Do export WAFCACHE=/tmp/foo-xyz where xyz represents the cache size in megabytes
If missing, the default cache size will be set to 10GB
"""
re_num = re.compile('[a-zA-Z_]+(\d+)')
CACHESIZE = 10*1024*1024*1024 # in bytes
CLEANRATIO = 0.8
DIRSIZE = 4096
def compile(self):
if Options.cache_global and not Options.options.nocache:
try:
os.makedirs(Options.cache_global)
except:
pass
try:
self.raw_compile()
finally:
if Options.cache_global and not Options.options.nocache:
self.sweep()
def sweep(self):
global CACHESIZE
CACHEDIR = Options.cache_global
# get the cache max size from the WAFCACHE filename
re_num = re.compile('[a-zA-Z_]+(\d+)')
val = re_num.sub('\\1', os.path.basename(Options.cache_global))
try:
CACHESIZE = int(val)
except:
pass
# map folder names to timestamps
flist = {}
for x in os.listdir(CACHEDIR):
j = os.path.join(CACHEDIR, x)
if os.path.isdir(j) and len(x) == 32: # dir names are md5 hexdigests
flist[x] = [os.stat(j).st_mtime, 0]
for (x, v) in flist.items():
cnt = DIRSIZE # each entry takes 4kB
d = os.path.join(CACHEDIR, x)
for k in os.listdir(d):
cnt += os.stat(os.path.join(d, k)).st_size
flist[x][1] = cnt
total = sum([x[1] for x in flist.values()])
Logs.debug('lru: Cache size is %r' % total)
if total >= CACHESIZE:
Logs.debug('lru: Trimming the cache since %r > %r' % (total, CACHESIZE))
# make a list to sort the folders by timestamp
lst = [(p, v[0], v[1]) for (p, v) in flist.items()]
lst.sort(key=lambda x: x[1]) # sort by timestamp
lst.reverse()
while total >= CACHESIZE * CLEANRATIO:
(k, t, s) = lst.pop()
p = os.path.join(CACHEDIR, k)
v = p + '.del'
try:
os.rename(p, v)
except:
# someone already did it
pass
else:
try:
shutil.rmtree(v)
except:
# this should not happen, but who knows?
Logs.warn('If you ever see this message, report it (%r)' % v)
total -= s
del flist[k]
Logs.debug('lru: Total at the end %r' % total)
Build.BuildContext.raw_compile = Build.BuildContext.compile
Build.BuildContext.compile = compile
Build.BuildContext.sweep = sweep
ntdb-1.0/buildtools/wafadmin/3rdparty/paranoid.py 0000664 0000000 0000000 00000001576 12241515307 0022177 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# ita 2010
import Logs, Utils, Build, Task
def say(txt):
Logs.warn("^o^: %s" % txt)
try:
ret = Utils.cmd_output('which cowsay 2> /dev/null').strip()
except Exception, e:
pass
else:
def say(txt):
f = Utils.cmd_output([ret, txt])
Utils.pprint('PINK', f)
say('you make the errors, we detect them')
def check_task_classes(self):
for x in Task.TaskBase.classes:
if isinstance(x, Task.Task):
if not getattr(cls, 'ext_in', None) or getattr(cls, 'before', None):
say('class %s has no precedence constraints (ext_in/before)')
if not getattr(cls, 'ext_out', None) or getattr(cls, 'after', None):
say('class %s has no precedence constraints (ext_out/after)')
comp = Build.BuildContext.compile
def compile(self):
if not getattr(self, 'magic', None):
check_task_classes(self)
return comp(self)
Build.BuildContext.compile = compile
ntdb-1.0/buildtools/wafadmin/3rdparty/swig.py 0000664 0000000 0000000 00000011670 12241515307 0021347 0 ustar 00root root 0000000 0000000 #! /usr/bin/env python
# encoding: UTF-8
# Petar Forai
# Thomas Nagy 2008
import re
import Task, Utils, Logs
from TaskGen import extension
from Configure import conf
import preproc
"""
Welcome in the hell of adding tasks dynamically
swig interface files may be created at runtime, the module name may be unknown in advance
rev 5859 is much more simple
"""
SWIG_EXTS = ['.swig', '.i']
swig_str = '${SWIG} ${SWIGFLAGS} ${_CCINCFLAGS} ${_CXXINCFLAGS} ${_CCDEFFLAGS} ${_CXXDEFFLAGS} ${SRC}'
cls = Task.simple_task_type('swig', swig_str, color='BLUE', ext_in='.i .h', ext_out='.o .c .cxx', shell=False)
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
if not getattr(self, 'init_outputs', None):
self.init_outputs = True
if not getattr(self, 'module', None):
# search the module name
txt = self.inputs[0].read(self.env)
m = re_module.search(txt)
if not m:
raise ValueError("could not find the swig module name")
self.module = m.group(1)
swig_c(self)
# add the language-specific output files as nodes
# call funs in the dict swig_langs
for x in self.env['SWIGFLAGS']:
# obtain the language
x = x[1:]
try:
fun = swig_langs[x]
except KeyError:
pass
else:
fun(self)
return Task.Task.runnable_status(self)
setattr(cls, 'runnable_status', runnable_status)
re_module = re.compile('%module(?:\s*\(.*\))?\s+(.+)', re.M)
re_1 = re.compile(r'^%module.*?\s+([\w]+)\s*?$', re.M)
re_2 = re.compile('%include "(.*)"', re.M)
re_3 = re.compile('#include "(.*)"', re.M)
def scan(self):
"scan for swig dependencies, climb the .i files"
env = self.env
lst_src = []
seen = []
to_see = [self.inputs[0]]
while to_see:
node = to_see.pop(0)
if node.id in seen:
continue
seen.append(node.id)
lst_src.append(node)
# read the file
code = node.read(env)
code = preproc.re_nl.sub('', code)
code = preproc.re_cpp.sub(preproc.repl, code)
# find .i files and project headers
names = re_2.findall(code) + re_3.findall(code)
for n in names:
for d in self.generator.env.INC_PATHS + [node.parent]:
u = d.find_resource(n)
if u:
to_see.append(u)
break
else:
Logs.warn('could not find %r' % n)
# list of nodes this one depends on, and module name if present
if Logs.verbose:
Logs.debug('deps: deps for %s: %s' % (str(self), str(lst_src)))
return (lst_src, [])
cls.scan = scan
# provide additional language processing
swig_langs = {}
def swig(fun):
swig_langs[fun.__name__.replace('swig_', '')] = fun
def swig_c(self):
ext = '.swigwrap_%d.c' % self.generator.idx
flags = self.env['SWIGFLAGS']
if '-c++' in flags:
ext += 'xx'
out_node = self.inputs[0].parent.find_or_declare(self.module + ext)
try:
if '-c++' in flags:
fun = self.generator.cxx_hook
else:
fun = self.generator.c_hook
except AttributeError:
raise Utils.WafError('No c%s compiler was found to process swig files' % ('-c++' in flags and '++' or ''))
task = fun(out_node)
task.set_run_after(self)
ge = self.generator.bld.generator
ge.outstanding.insert(0, task)
ge.total += 1
try:
ltask = self.generator.link_task
except AttributeError:
pass
else:
ltask.inputs.append(task.outputs[0])
self.outputs.append(out_node)
if not '-o' in self.env['SWIGFLAGS']:
self.env.append_value('SWIGFLAGS', '-o')
self.env.append_value('SWIGFLAGS', self.outputs[0].abspath(self.env))
@swig
def swig_python(tsk):
tsk.set_outputs(tsk.inputs[0].parent.find_or_declare(tsk.module + '.py'))
@swig
def swig_ocaml(tsk):
tsk.set_outputs(tsk.inputs[0].parent.find_or_declare(tsk.module + '.ml'))
tsk.set_outputs(tsk.inputs[0].parent.find_or_declare(tsk.module + '.mli'))
@extension(SWIG_EXTS)
def i_file(self, node):
# the task instance
tsk = self.create_task('swig')
tsk.set_inputs(node)
tsk.module = getattr(self, 'swig_module', None)
flags = self.to_list(getattr(self, 'swig_flags', []))
self.env.append_value('SWIGFLAGS', flags)
if not '-outdir' in flags:
flags.append('-outdir')
flags.append(node.parent.abspath(self.env))
@conf
def check_swig_version(conf, minver=None):
"""Check for a minimum swig version like conf.check_swig_version('1.3.28')
or conf.check_swig_version((1,3,28)) """
reg_swig = re.compile(r'SWIG Version\s(.*)', re.M)
swig_out = Utils.cmd_output('%s -version' % conf.env['SWIG'])
swigver = [int(s) for s in reg_swig.findall(swig_out)[0].split('.')]
if isinstance(minver, basestring):
minver = [int(s) for s in minver.split(".")]
if isinstance(minver, tuple):
minver = [int(s) for s in minver]
result = (minver is None) or (minver[:3] <= swigver[:3])
swigver_full = '.'.join(map(str, swigver))
if result:
conf.env['SWIG_VERSION'] = swigver_full
minver_str = '.'.join(map(str, minver))
if minver is None:
conf.check_message_custom('swig version', '', swigver_full)
else:
conf.check_message('swig version', '>= %s' % (minver_str,), result, option=swigver_full)
return result
def detect(conf):
swig = conf.find_program('swig', var='SWIG', mandatory=True)
ntdb-1.0/buildtools/wafadmin/3rdparty/valadoc.py 0000664 0000000 0000000 00000007166 12241515307 0022014 0 ustar 00root root 0000000 0000000 #! /usr/bin/env python
# encoding: UTF-8
# Nicolas Joseph 2009
from fnmatch import fnmatchcase
import os, os.path, re, stat
import Task, Utils, Node, Constants
from TaskGen import feature, extension, after
from Logs import debug, warn, error
VALADOC_STR = '${VALADOC}'
class valadoc_task(Task.Task):
vars = ['VALADOC', 'VALADOCFLAGS']
color = 'BLUE'
after = 'cxx_link cc_link'
quiet = True
output_dir = ''
doclet = ''
package_name = ''
package_version = ''
files = []
protected = True
private = False
inherit = False
deps = False
enable_non_null_experimental = False
force = False
def runnable_status(self):
return True
def run(self):
if self.env['VALADOC']:
if not self.env['VALADOCFLAGS']:
self.env['VALADOCFLAGS'] = ''
cmd = [Utils.subst_vars(VALADOC_STR, self.env)]
cmd.append ('-o %s' % self.output_dir)
if getattr(self, 'doclet', None):
cmd.append ('--doclet %s' % self.doclet)
cmd.append ('--package-name %s' % self.package_name)
if getattr(self, 'version', None):
cmd.append ('--package-version %s' % self.package_version)
if getattr(self, 'packages', None):
for package in self.packages:
cmd.append ('--pkg %s' % package)
if getattr(self, 'vapi_dirs', None):
for vapi_dir in self.vapi_dirs:
cmd.append ('--vapidir %s' % vapi_dir)
if not getattr(self, 'protected', None):
cmd.append ('--no-protected')
if getattr(self, 'private', None):
cmd.append ('--private')
if getattr(self, 'inherit', None):
cmd.append ('--inherit')
if getattr(self, 'deps', None):
cmd.append ('--deps')
if getattr(self, 'enable_non_null_experimental', None):
cmd.append ('--enable-non-null-experimental')
if getattr(self, 'force', None):
cmd.append ('--force')
cmd.append (' '.join ([x.relpath_gen (self.generator.bld.bldnode) for x in self.files]))
return self.generator.bld.exec_command(' '.join(cmd))
else:
error ('You must install valadoc for generate the API documentation')
return -1
@feature('valadoc')
def process_valadoc(self):
task = getattr(self, 'task', None)
if not task:
task = self.create_task('valadoc')
self.task = task
if getattr(self, 'output_dir', None):
task.output_dir = self.output_dir
else:
Utils.WafError('no output directory')
if getattr(self, 'doclet', None):
task.doclet = self.doclet
else:
Utils.WafError('no doclet directory')
if getattr(self, 'package_name', None):
task.package_name = self.package_name
else:
Utils.WafError('no package name')
if getattr(self, 'package_version', None):
task.package_version = self.package_version
if getattr(self, 'packages', None):
task.packages = Utils.to_list(self.packages)
if getattr(self, 'vapi_dirs', None):
task.vapi_dirs = Utils.to_list(self.vapi_dirs)
if getattr(self, 'files', None):
task.files = self.files
else:
Utils.WafError('no input file')
if getattr(self, 'protected', None):
task.protected = self.protected
if getattr(self, 'private', None):
task.private = self.private
if getattr(self, 'inherit', None):
task.inherit = self.inherit
if getattr(self, 'deps', None):
task.deps = self.deps
if getattr(self, 'enable_non_null_experimental', None):
task.enable_non_null_experimental = self.enable_non_null_experimental
if getattr(self, 'force', None):
task.force = self.force
def detect(conf):
conf.find_program('valadoc', var='VALADOC', mandatory=False)
ntdb-1.0/buildtools/wafadmin/Build.py 0000664 0000000 0000000 00000067362 12241515307 0017676 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"""
Dependency tree holder
The class Build holds all the info related to a build:
* file system representation (tree of Node instances)
* various cached objects (task signatures, file scan results, ..)
There is only one Build object at a time (bld singleton)
"""
import os, sys, errno, re, glob, gc, datetime, shutil
try: import cPickle
except: import pickle as cPickle
import Runner, TaskGen, Node, Scripting, Utils, Environment, Task, Logs, Options
from Logs import debug, error, info
from Constants import *
SAVED_ATTRS = 'root srcnode bldnode node_sigs node_deps raw_deps task_sigs id_nodes'.split()
"Build class members to save"
bld = None
"singleton - safe to use when Waf is not used as a library"
class BuildError(Utils.WafError):
def __init__(self, b=None, t=[]):
self.bld = b
self.tasks = t
self.ret = 1
Utils.WafError.__init__(self, self.format_error())
def format_error(self):
lst = ['Build failed:']
for tsk in self.tasks:
txt = tsk.format_error()
if txt: lst.append(txt)
sep = ' '
if len(lst) > 2:
sep = '\n'
return sep.join(lst)
def group_method(fun):
"""
sets a build context method to execute after the current group has finished executing
this is useful for installing build files:
* calling install_files/install_as will fail if called too early
* people do not want to define install method in their task classes
TODO: try it
"""
def f(*k, **kw):
if not k[0].is_install:
return False
postpone = True
if 'postpone' in kw:
postpone = kw['postpone']
del kw['postpone']
# TODO waf 1.6 in theory there should be no reference to the TaskManager internals here
if postpone:
m = k[0].task_manager
if not m.groups: m.add_group()
m.groups[m.current_group].post_funs.append((fun, k, kw))
if not 'cwd' in kw:
kw['cwd'] = k[0].path
else:
fun(*k, **kw)
return f
class BuildContext(Utils.Context):
"holds the dependency tree"
def __init__(self):
# not a singleton, but provided for compatibility
global bld
bld = self
self.task_manager = Task.TaskManager()
# instead of hashing the nodes, we assign them a unique id when they are created
self.id_nodes = 0
self.idx = {}
# map names to environments, the 'default' must be defined
self.all_envs = {}
# ======================================= #
# code for reading the scripts
# project build directory - do not reset() from load_dirs()
self.bdir = ''
# the current directory from which the code is run
# the folder changes everytime a wscript is read
self.path = None
# Manual dependencies.
self.deps_man = Utils.DefaultDict(list)
# ======================================= #
# cache variables
# local cache for absolute paths - cache_node_abspath[variant][node]
self.cache_node_abspath = {}
# list of folders that are already scanned
# so that we do not need to stat them one more time
self.cache_scanned_folders = {}
# list of targets to uninstall for removing the empty folders after uninstalling
self.uninstall = []
# ======================================= #
# tasks and objects
# build dir variants (release, debug, ..)
for v in 'cache_node_abspath task_sigs node_deps raw_deps node_sigs'.split():
var = {}
setattr(self, v, var)
self.cache_dir_contents = {}
self.all_task_gen = []
self.task_gen_cache_names = {}
self.cache_sig_vars = {}
self.log = None
self.root = None
self.srcnode = None
self.bldnode = None
# bind the build context to the nodes in use
# this means better encapsulation and no build context singleton
class node_class(Node.Node):
pass
self.node_class = node_class
self.node_class.__module__ = "Node"
self.node_class.__name__ = "Nodu"
self.node_class.bld = self
self.is_install = None
def __copy__(self):
"nodes are not supposed to be copied"
raise Utils.WafError('build contexts are not supposed to be cloned')
def load(self):
"load the cache from the disk"
try:
env = Environment.Environment(os.path.join(self.cachedir, 'build.config.py'))
except (IOError, OSError):
pass
else:
if env['version'] < HEXVERSION:
raise Utils.WafError('Version mismatch! reconfigure the project')
for t in env['tools']:
self.setup(**t)
try:
gc.disable()
f = data = None
Node.Nodu = self.node_class
try:
f = open(os.path.join(self.bdir, DBFILE), 'rb')
except (IOError, EOFError):
# handle missing file/empty file
pass
try:
if f: data = cPickle.load(f)
except AttributeError:
# handle file of an old Waf version
# that has an attribute which no longer exist
# (e.g. AttributeError: 'module' object has no attribute 'BuildDTO')
if Logs.verbose > 1: raise
if data:
for x in SAVED_ATTRS: setattr(self, x, data[x])
else:
debug('build: Build cache loading failed')
finally:
if f: f.close()
gc.enable()
def save(self):
"store the cache on disk, see self.load"
gc.disable()
self.root.__class__.bld = None
# some people are very nervous with ctrl+c so we have to make a temporary file
Node.Nodu = self.node_class
db = os.path.join(self.bdir, DBFILE)
file = open(db + '.tmp', 'wb')
data = {}
for x in SAVED_ATTRS: data[x] = getattr(self, x)
cPickle.dump(data, file, -1)
file.close()
# do not use shutil.move
try: os.unlink(db)
except OSError: pass
os.rename(db + '.tmp', db)
self.root.__class__.bld = self
gc.enable()
# ======================================= #
def clean(self):
debug('build: clean called')
# does not clean files created during the configuration
precious = set([])
for env in self.all_envs.values():
for x in env[CFG_FILES]:
node = self.srcnode.find_resource(x)
if node:
precious.add(node.id)
def clean_rec(node):
for x in list(node.childs.keys()):
nd = node.childs[x]
tp = nd.id & 3
if tp == Node.DIR:
clean_rec(nd)
elif tp == Node.BUILD:
if nd.id in precious: continue
for env in self.all_envs.values():
try: os.remove(nd.abspath(env))
except OSError: pass
node.childs.__delitem__(x)
clean_rec(self.srcnode)
for v in 'node_sigs node_deps task_sigs raw_deps cache_node_abspath'.split():
setattr(self, v, {})
def compile(self):
"""The cache file is not written if nothing was build at all (build is up to date)"""
debug('build: compile called')
"""
import cProfile, pstats
cProfile.run("import Build\nBuild.bld.flush()", 'profi.txt')
p = pstats.Stats('profi.txt')
p.sort_stats('cumulative').print_stats(80)
"""
self.flush()
#"""
self.generator = Runner.Parallel(self, Options.options.jobs)
def dw(on=True):
if Options.options.progress_bar:
if on: sys.stderr.write(Logs.colors.cursor_on)
else: sys.stderr.write(Logs.colors.cursor_off)
debug('build: executor starting')
back = os.getcwd()
os.chdir(self.bldnode.abspath())
try:
try:
dw(on=False)
self.generator.start()
except KeyboardInterrupt:
dw()
# if self.generator.processed != 1: TODO
self.save()
raise
except Exception:
dw()
# do not store anything, for something bad happened
raise
else:
dw()
#if self.generator.processed != 1: TODO
self.save()
if self.generator.error:
raise BuildError(self, self.task_manager.tasks_done)
finally:
os.chdir(back)
def install(self):
"this function is called for both install and uninstall"
debug('build: install called')
self.flush()
# remove empty folders after uninstalling
if self.is_install < 0:
lst = []
for x in self.uninstall:
dir = os.path.dirname(x)
if not dir in lst: lst.append(dir)
lst.sort()
lst.reverse()
nlst = []
for y in lst:
x = y
while len(x) > 4:
if not x in nlst: nlst.append(x)
x = os.path.dirname(x)
nlst.sort()
nlst.reverse()
for x in nlst:
try: os.rmdir(x)
except OSError: pass
def new_task_gen(self, *k, **kw):
if self.task_gen_cache_names:
self.task_gen_cache_names = {}
kw['bld'] = self
if len(k) == 0:
ret = TaskGen.task_gen(*k, **kw)
else:
cls_name = k[0]
try: cls = TaskGen.task_gen.classes[cls_name]
except KeyError: raise Utils.WscriptError('%s is not a valid task generator -> %s' %
(cls_name, [x for x in TaskGen.task_gen.classes]))
ret = cls(*k, **kw)
return ret
def __call__(self, *k, **kw):
if self.task_gen_cache_names:
self.task_gen_cache_names = {}
kw['bld'] = self
return TaskGen.task_gen(*k, **kw)
def load_envs(self):
try:
lst = Utils.listdir(self.cachedir)
except OSError, e:
if e.errno == errno.ENOENT:
raise Utils.WafError('The project was not configured: run "waf configure" first!')
else:
raise
if not lst:
raise Utils.WafError('The cache directory is empty: reconfigure the project')
for file in lst:
if file.endswith(CACHE_SUFFIX):
env = Environment.Environment(os.path.join(self.cachedir, file))
name = file[:-len(CACHE_SUFFIX)]
self.all_envs[name] = env
self.init_variants()
for env in self.all_envs.values():
for f in env[CFG_FILES]:
newnode = self.path.find_or_declare(f)
try:
hash = Utils.h_file(newnode.abspath(env))
except (IOError, AttributeError):
error("cannot find "+f)
hash = SIG_NIL
self.node_sigs[env.variant()][newnode.id] = hash
# TODO: hmmm, these nodes are removed from the tree when calling rescan()
self.bldnode = self.root.find_dir(self.bldnode.abspath())
self.path = self.srcnode = self.root.find_dir(self.srcnode.abspath())
self.cwd = self.bldnode.abspath()
def setup(self, tool, tooldir=None, funs=None):
"setup tools for build process"
if isinstance(tool, list):
for i in tool: self.setup(i, tooldir)
return
if not tooldir: tooldir = Options.tooldir
module = Utils.load_tool(tool, tooldir)
if hasattr(module, "setup"): module.setup(self)
def init_variants(self):
debug('build: init variants')
lstvariants = []
for env in self.all_envs.values():
if not env.variant() in lstvariants:
lstvariants.append(env.variant())
self.lst_variants = lstvariants
debug('build: list of variants is %r', lstvariants)
for name in lstvariants+[0]:
for v in 'node_sigs cache_node_abspath'.split():
var = getattr(self, v)
if not name in var:
var[name] = {}
# ======================================= #
# node and folder handling
# this should be the main entry point
def load_dirs(self, srcdir, blddir, load_cache=1):
"this functions should be the start of everything"
assert(os.path.isabs(srcdir))
assert(os.path.isabs(blddir))
self.cachedir = os.path.join(blddir, CACHE_DIR)
if srcdir == blddir:
raise Utils.WafError("build dir must be different from srcdir: %s <-> %s " % (srcdir, blddir))
self.bdir = blddir
# try to load the cache file, if it does not exist, nothing happens
self.load()
if not self.root:
Node.Nodu = self.node_class
self.root = Node.Nodu('', None, Node.DIR)
if not self.srcnode:
self.srcnode = self.root.ensure_dir_node_from_path(srcdir)
debug('build: srcnode is %s and srcdir %s', self.srcnode.name, srcdir)
self.path = self.srcnode
# create this build dir if necessary
try: os.makedirs(blddir)
except OSError: pass
if not self.bldnode:
self.bldnode = self.root.ensure_dir_node_from_path(blddir)
self.init_variants()
def rescan(self, src_dir_node):
"""
look the contents of a (folder)node and update its list of childs
The intent is to perform the following steps
* remove the nodes for the files that have disappeared
* remove the signatures for the build files that have disappeared
* cache the results of os.listdir
* create the build folder equivalent (mkdir) for each variant
src/bar -> build/default/src/bar, build/release/src/bar
when a folder in the source directory is removed, we do not check recursively
to remove the unused nodes. To do that, call 'waf clean' and build again.
"""
# do not rescan over and over again
# TODO use a single variable in waf 1.6
if self.cache_scanned_folders.get(src_dir_node.id, None): return
self.cache_scanned_folders[src_dir_node.id] = True
# TODO remove in waf 1.6
if hasattr(self, 'repository'): self.repository(src_dir_node)
if not src_dir_node.name and sys.platform == 'win32':
# the root has no name, contains drive letters, and cannot be listed
return
# first, take the case of the source directory
parent_path = src_dir_node.abspath()
try:
lst = set(Utils.listdir(parent_path))
except OSError:
lst = set([])
# TODO move this at the bottom
self.cache_dir_contents[src_dir_node.id] = lst
# hash the existing source files, remove the others
cache = self.node_sigs[0]
for x in src_dir_node.childs.values():
if x.id & 3 != Node.FILE: continue
if x.name in lst:
try:
cache[x.id] = Utils.h_file(x.abspath())
except IOError:
raise Utils.WafError('The file %s is not readable or has become a dir' % x.abspath())
else:
try: del cache[x.id]
except KeyError: pass
del src_dir_node.childs[x.name]
# first obtain the differences between srcnode and src_dir_node
h1 = self.srcnode.height()
h2 = src_dir_node.height()
lst = []
child = src_dir_node
while h2 > h1:
lst.append(child.name)
child = child.parent
h2 -= 1
lst.reverse()
# list the files in the build dirs
try:
for variant in self.lst_variants:
sub_path = os.path.join(self.bldnode.abspath(), variant , *lst)
self.listdir_bld(src_dir_node, sub_path, variant)
except OSError:
# listdir failed, remove the build node signatures for all variants
for node in src_dir_node.childs.values():
if node.id & 3 != Node.BUILD:
continue
for dct in self.node_sigs.values():
if node.id in dct:
dct.__delitem__(node.id)
# the policy is to avoid removing nodes representing directories
src_dir_node.childs.__delitem__(node.name)
for variant in self.lst_variants:
sub_path = os.path.join(self.bldnode.abspath(), variant , *lst)
try:
os.makedirs(sub_path)
except OSError:
pass
# ======================================= #
def listdir_src(self, parent_node):
"""do not use, kept for compatibility"""
pass
def remove_node(self, node):
"""do not use, kept for compatibility"""
pass
def listdir_bld(self, parent_node, path, variant):
"""in this method we do not add timestamps but we remove them
when the files no longer exist (file removed in the build dir)"""
i_existing_nodes = [x for x in parent_node.childs.values() if x.id & 3 == Node.BUILD]
lst = set(Utils.listdir(path))
node_names = set([x.name for x in i_existing_nodes])
remove_names = node_names - lst
# remove the stamps of the build nodes that no longer exist on the filesystem
ids_to_remove = [x.id for x in i_existing_nodes if x.name in remove_names]
cache = self.node_sigs[variant]
for nid in ids_to_remove:
if nid in cache:
cache.__delitem__(nid)
def get_env(self):
return self.env_of_name('default')
def set_env(self, name, val):
self.all_envs[name] = val
env = property(get_env, set_env)
def add_manual_dependency(self, path, value):
if isinstance(path, Node.Node):
node = path
elif os.path.isabs(path):
node = self.root.find_resource(path)
else:
node = self.path.find_resource(path)
self.deps_man[node.id].append(value)
def launch_node(self):
"""return the launch directory as a node"""
# p_ln is kind of private, but public in case if
try:
return self.p_ln
except AttributeError:
self.p_ln = self.root.find_dir(Options.launch_dir)
return self.p_ln
def glob(self, pattern, relative=True):
"files matching the pattern, seen from the current folder"
path = self.path.abspath()
files = [self.root.find_resource(x) for x in glob.glob(path+os.sep+pattern)]
if relative:
files = [x.path_to_parent(self.path) for x in files if x]
else:
files = [x.abspath() for x in files if x]
return files
## the following methods are candidates for the stable apis ##
def add_group(self, *k):
self.task_manager.add_group(*k)
def set_group(self, *k, **kw):
self.task_manager.set_group(*k, **kw)
def hash_env_vars(self, env, vars_lst):
"""hash environment variables
['CXX', ..] -> [env['CXX'], ..] -> md5()"""
# ccroot objects use the same environment for building the .o at once
# the same environment and the same variables are used
idx = str(id(env)) + str(vars_lst)
try: return self.cache_sig_vars[idx]
except KeyError: pass
lst = [str(env[a]) for a in vars_lst]
ret = Utils.h_list(lst)
debug('envhash: %r %r', ret, lst)
# next time
self.cache_sig_vars[idx] = ret
return ret
def name_to_obj(self, name, env):
"""retrieve a task generator from its name or its target name
remember that names must be unique"""
cache = self.task_gen_cache_names
if not cache:
# create the index lazily
for x in self.all_task_gen:
vt = x.env.variant() + '_'
if x.name:
cache[vt + x.name] = x
else:
if isinstance(x.target, str):
target = x.target
else:
target = ' '.join(x.target)
v = vt + target
if not cache.get(v, None):
cache[v] = x
return cache.get(env.variant() + '_' + name, None)
def flush(self, all=1):
"""tell the task generators to create the tasks"""
self.ini = datetime.datetime.now()
# force the initialization of the mapping name->object in flush
# name_to_obj can be used in userland scripts, in that case beware of incomplete mapping
self.task_gen_cache_names = {}
self.name_to_obj('', self.env)
debug('build: delayed operation TaskGen.flush() called')
if Options.options.compile_targets:
debug('task_gen: posting objects %r listed in compile_targets', Options.options.compile_targets)
mana = self.task_manager
to_post = []
min_grp = 0
# ensure the target names exist, fail before any post()
target_objects = Utils.DefaultDict(list)
for target_name in Options.options.compile_targets.split(','):
# trim target_name (handle cases when the user added spaces to targets)
target_name = target_name.strip()
for env in self.all_envs.values():
tg = self.name_to_obj(target_name, env)
if tg:
target_objects[target_name].append(tg)
m = mana.group_idx(tg)
if m > min_grp:
min_grp = m
to_post = [tg]
elif m == min_grp:
to_post.append(tg)
if not target_name in target_objects and all:
raise Utils.WafError("target '%s' does not exist" % target_name)
debug('group: Forcing up to group %s for target %s', mana.group_name(min_grp), Options.options.compile_targets)
# post all the task generators in previous groups
for i in xrange(len(mana.groups)):
mana.current_group = i
if i == min_grp:
break
g = mana.groups[i]
debug('group: Forcing group %s', mana.group_name(g))
for t in g.tasks_gen:
debug('group: Posting %s', t.name or t.target)
t.post()
# then post the task generators listed in compile_targets in the last group
for t in to_post:
t.post()
else:
debug('task_gen: posting objects (normal)')
ln = self.launch_node()
# if the build is started from the build directory, do as if it was started from the top-level
# for the pretty-printing (Node.py), the two lines below cannot be moved to Build::launch_node
if ln.is_child_of(self.bldnode) or not ln.is_child_of(self.srcnode):
ln = self.srcnode
# if the project file is located under the source directory, build all targets by default
# else 'waf configure build' does nothing
proj_node = self.root.find_dir(os.path.split(Utils.g_module.root_path)[0])
if proj_node.id != self.srcnode.id:
ln = self.srcnode
for i in xrange(len(self.task_manager.groups)):
g = self.task_manager.groups[i]
self.task_manager.current_group = i
if Logs.verbose:
groups = [x for x in self.task_manager.groups_names if id(self.task_manager.groups_names[x]) == id(g)]
name = groups and groups[0] or 'unnamed'
Logs.debug('group: group', name)
for tg in g.tasks_gen:
if not tg.path.is_child_of(ln):
continue
if Logs.verbose:
Logs.debug('group: %s' % tg)
tg.post()
def env_of_name(self, name):
try:
return self.all_envs[name]
except KeyError:
error('no such environment: '+name)
return None
def progress_line(self, state, total, col1, col2):
n = len(str(total))
Utils.rot_idx += 1
ind = Utils.rot_chr[Utils.rot_idx % 4]
ini = self.ini
pc = (100.*state)/total
eta = Utils.get_elapsed_time(ini)
fs = "[%%%dd/%%%dd][%%s%%2d%%%%%%s][%s][" % (n, n, ind)
left = fs % (state, total, col1, pc, col2)
right = '][%s%s%s]' % (col1, eta, col2)
cols = Utils.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2)
if cols < 7: cols = 7
ratio = int((cols*state)/total) - 1
bar = ('='*ratio+'>').ljust(cols)
msg = Utils.indicator % (left, bar, right)
return msg
# do_install is not used anywhere
def do_install(self, src, tgt, chmod=O644):
"""returns true if the file was effectively installed or uninstalled, false otherwise"""
if self.is_install > 0:
if not Options.options.force:
# check if the file is already there to avoid a copy
try:
st1 = os.stat(tgt)
st2 = os.stat(src)
except OSError:
pass
else:
# same size and identical timestamps -> make no copy
if st1.st_mtime >= st2.st_mtime and st1.st_size == st2.st_size:
return False
srclbl = src.replace(self.srcnode.abspath(None)+os.sep, '')
info("* installing %s as %s" % (srclbl, tgt))
# following is for shared libs and stale inodes (-_-)
try: os.remove(tgt)
except OSError: pass
try:
shutil.copy2(src, tgt)
os.chmod(tgt, chmod)
except IOError:
try:
os.stat(src)
except (OSError, IOError):
error('File %r does not exist' % src)
raise Utils.WafError('Could not install the file %r' % tgt)
return True
elif self.is_install < 0:
info("* uninstalling %s" % tgt)
self.uninstall.append(tgt)
try:
os.remove(tgt)
except OSError, e:
if e.errno != errno.ENOENT:
if not getattr(self, 'uninstall_error', None):
self.uninstall_error = True
Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
if Logs.verbose > 1:
Logs.warn('could not remove %s (error code %r)' % (e.filename, e.errno))
return True
red = re.compile(r"^([A-Za-z]:)?[/\\\\]*")
def get_install_path(self, path, env=None):
"installation path prefixed by the destdir, the variables like in '${PREFIX}/bin' are substituted"
if not env: env = self.env
destdir = env.get_destdir()
path = path.replace('/', os.sep)
destpath = Utils.subst_vars(path, env)
if destdir:
destpath = os.path.join(destdir, self.red.sub('', destpath))
return destpath
def install_dir(self, path, env=None):
"""
create empty folders for the installation (very rarely used)
"""
if env:
assert isinstance(env, Environment.Environment), "invalid parameter"
else:
env = self.env
if not path:
return []
destpath = self.get_install_path(path, env)
if self.is_install > 0:
info('* creating %s' % destpath)
Utils.check_dir(destpath)
elif self.is_install < 0:
info('* removing %s' % destpath)
self.uninstall.append(destpath + '/xxx') # yes, ugly
def install_files(self, path, files, env=None, chmod=O644, relative_trick=False, cwd=None):
"""To install files only after they have been built, put the calls in a method named
post_build on the top-level wscript
The files must be a list and contain paths as strings or as Nodes
The relative_trick flag can be set to install folders, use bld.path.ant_glob() with it
"""
if env:
assert isinstance(env, Environment.Environment), "invalid parameter"
else:
env = self.env
if not path: return []
if not cwd:
cwd = self.path
if isinstance(files, str) and '*' in files:
gl = cwd.abspath() + os.sep + files
lst = glob.glob(gl)
else:
lst = Utils.to_list(files)
if not getattr(lst, '__iter__', False):
lst = [lst]
destpath = self.get_install_path(path, env)
Utils.check_dir(destpath)
installed_files = []
for filename in lst:
if isinstance(filename, str) and os.path.isabs(filename):
alst = Utils.split_path(filename)
destfile = os.path.join(destpath, alst[-1])
else:
if isinstance(filename, Node.Node):
nd = filename
else:
nd = cwd.find_resource(filename)
if not nd:
raise Utils.WafError("Unable to install the file %r (not found in %s)" % (filename, cwd))
if relative_trick:
destfile = os.path.join(destpath, filename)
Utils.check_dir(os.path.dirname(destfile))
else:
destfile = os.path.join(destpath, nd.name)
filename = nd.abspath(env)
if self.do_install(filename, destfile, chmod):
installed_files.append(destfile)
return installed_files
def install_as(self, path, srcfile, env=None, chmod=O644, cwd=None):
"""
srcfile may be a string or a Node representing the file to install
returns True if the file was effectively installed, False otherwise
"""
if env:
assert isinstance(env, Environment.Environment), "invalid parameter"
else:
env = self.env
if not path:
raise Utils.WafError("where do you want to install %r? (%r?)" % (srcfile, path))
if not cwd:
cwd = self.path
destpath = self.get_install_path(path, env)
dir, name = os.path.split(destpath)
Utils.check_dir(dir)
# the source path
if isinstance(srcfile, Node.Node):
src = srcfile.abspath(env)
else:
src = srcfile
if not os.path.isabs(srcfile):
node = cwd.find_resource(srcfile)
if not node:
raise Utils.WafError("Unable to install the file %r (not found in %s)" % (srcfile, cwd))
src = node.abspath(env)
return self.do_install(src, destpath, chmod)
def symlink_as(self, path, src, env=None, cwd=None):
"""example: bld.symlink_as('${PREFIX}/lib/libfoo.so', 'libfoo.so.1.2.3') """
if sys.platform == 'win32':
# well, this *cannot* work
return
if not path:
raise Utils.WafError("where do you want to install %r? (%r?)" % (src, path))
tgt = self.get_install_path(path, env)
dir, name = os.path.split(tgt)
Utils.check_dir(dir)
if self.is_install > 0:
link = False
if not os.path.islink(tgt):
link = True
elif os.readlink(tgt) != src:
link = True
if link:
try: os.remove(tgt)
except OSError: pass
info('* symlink %s (-> %s)' % (tgt, src))
os.symlink(src, tgt)
return 0
else: # UNINSTALL
try:
info('* removing %s' % (tgt))
os.remove(tgt)
return 0
except OSError:
return 1
def exec_command(self, cmd, **kw):
# 'runner' zone is printed out for waf -v, see wafadmin/Options.py
debug('runner: system command -> %s', cmd)
if self.log:
self.log.write('%s\n' % cmd)
kw['log'] = self.log
try:
if not kw.get('cwd', None):
kw['cwd'] = self.cwd
except AttributeError:
self.cwd = kw['cwd'] = self.bldnode.abspath()
return Utils.exec_command(cmd, **kw)
def printout(self, s):
f = self.log or sys.stderr
f.write(s)
f.flush()
def add_subdirs(self, dirs):
self.recurse(dirs, 'build')
def pre_recurse(self, name_or_mod, path, nexdir):
if not hasattr(self, 'oldpath'):
self.oldpath = []
self.oldpath.append(self.path)
self.path = self.root.find_dir(nexdir)
return {'bld': self, 'ctx': self}
def post_recurse(self, name_or_mod, path, nexdir):
self.path = self.oldpath.pop()
###### user-defined behaviour
def pre_build(self):
if hasattr(self, 'pre_funs'):
for m in self.pre_funs:
m(self)
def post_build(self):
if hasattr(self, 'post_funs'):
for m in self.post_funs:
m(self)
def add_pre_fun(self, meth):
try: self.pre_funs.append(meth)
except AttributeError: self.pre_funs = [meth]
def add_post_fun(self, meth):
try: self.post_funs.append(meth)
except AttributeError: self.post_funs = [meth]
def use_the_magic(self):
Task.algotype = Task.MAXPARALLEL
Task.file_deps = Task.extract_deps
self.magic = True
install_as = group_method(install_as)
install_files = group_method(install_files)
symlink_as = group_method(symlink_as)
ntdb-1.0/buildtools/wafadmin/Configure.py 0000664 0000000 0000000 00000027622 12241515307 0020553 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"""
Configuration system
A configuration instance is created when "waf configure" is called, it is used to:
* create data dictionaries (Environment instances)
* store the list of modules to import
The old model (copied from Scons) was to store logic (mapping file extensions to functions)
along with the data. In Waf a way was found to separate that logic by adding an indirection
layer (storing the names in the Environment instances)
In the new model, the logic is more object-oriented, and the user scripts provide the
logic. The data files (Environments) must contain configuration data only (flags, ..).
Note: the c/c++ related code is in the module config_c
"""
import os, shlex, sys, time
try: import cPickle
except ImportError: import pickle as cPickle
import Environment, Utils, Options, Logs
from Logs import warn
from Constants import *
try:
from urllib import request
except:
from urllib import urlopen
else:
urlopen = request.urlopen
conf_template = '''# project %(app)s configured on %(now)s by
# waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s)
# using %(args)s
#
'''
class ConfigurationError(Utils.WscriptError):
pass
autoconfig = False
"reconfigure the project automatically"
def find_file(filename, path_list):
"""find a file in a list of paths
@param filename: name of the file to search for
@param path_list: list of directories to search
@return: the first occurrence filename or '' if filename could not be found
"""
for directory in Utils.to_list(path_list):
if os.path.exists(os.path.join(directory, filename)):
return directory
return ''
def find_program_impl(env, filename, path_list=[], var=None, environ=None):
"""find a program in folders path_lst, and sets env[var]
@param env: environment
@param filename: name of the program to search for
@param path_list: list of directories to search for filename
@param var: environment value to be checked for in env or os.environ
@return: either the value that is referenced with [var] in env or os.environ
or the first occurrence filename or '' if filename could not be found
"""
if not environ:
environ = os.environ
try: path_list = path_list.split()
except AttributeError: pass
if var:
if env[var]: return env[var]
if var in environ: env[var] = environ[var]
if not path_list: path_list = environ.get('PATH', '').split(os.pathsep)
ext = (Options.platform == 'win32') and '.exe,.com,.bat,.cmd' or ''
for y in [filename+x for x in ext.split(',')]:
for directory in path_list:
x = os.path.join(directory, y)
if os.path.isfile(x):
if var: env[var] = x
return x
return ''
class ConfigurationContext(Utils.Context):
tests = {}
error_handlers = []
def __init__(self, env=None, blddir='', srcdir=''):
self.env = None
self.envname = ''
self.environ = dict(os.environ)
self.line_just = 40
self.blddir = blddir
self.srcdir = srcdir
self.all_envs = {}
# curdir: necessary for recursion
self.cwd = self.curdir = os.getcwd()
self.tools = [] # tools loaded in the configuration, and that will be loaded when building
self.setenv(DEFAULT)
self.lastprog = ''
self.hash = 0
self.files = []
self.tool_cache = []
if self.blddir:
self.post_init()
def post_init(self):
self.cachedir = os.path.join(self.blddir, CACHE_DIR)
path = os.path.join(self.blddir, WAF_CONFIG_LOG)
try: os.unlink(path)
except (OSError, IOError): pass
try:
self.log = open(path, 'w')
except (OSError, IOError):
self.fatal('could not open %r for writing' % path)
app = Utils.g_module.APPNAME
if app:
ver = getattr(Utils.g_module, 'VERSION', '')
if ver:
app = "%s (%s)" % (app, ver)
now = time.ctime()
pyver = sys.hexversion
systype = sys.platform
args = " ".join(sys.argv)
wafver = WAFVERSION
abi = ABI
self.log.write(conf_template % vars())
def __del__(self):
"""cleanup function: close config.log"""
# may be ran by the gc, not always after initialization
if hasattr(self, 'log') and self.log:
self.log.close()
def fatal(self, msg):
raise ConfigurationError(msg)
def check_tool(self, input, tooldir=None, funs=None):
"load a waf tool"
tools = Utils.to_list(input)
if tooldir: tooldir = Utils.to_list(tooldir)
for tool in tools:
tool = tool.replace('++', 'xx')
if tool == 'java': tool = 'javaw'
if tool.lower() == 'unittest': tool = 'unittestw'
# avoid loading the same tool more than once with the same functions
# used by composite projects
mag = (tool, id(self.env), funs)
if mag in self.tool_cache:
continue
self.tool_cache.append(mag)
module = None
try:
module = Utils.load_tool(tool, tooldir)
except Exception, e:
ex = e
if Options.options.download:
_3rdparty = os.path.normpath(Options.tooldir[0] + os.sep + '..' + os.sep + '3rdparty')
# try to download the tool from the repository then
# the default is set to false
for x in Utils.to_list(Options.remote_repo):
for sub in ['branches/waf-%s/wafadmin/3rdparty' % WAFVERSION, 'trunk/wafadmin/3rdparty']:
url = '/'.join((x, sub, tool + '.py'))
try:
web = urlopen(url)
if web.getcode() != 200:
continue
except Exception, e:
# on python3 urlopen throws an exception
continue
else:
loc = None
try:
loc = open(_3rdparty + os.sep + tool + '.py', 'wb')
loc.write(web.read())
web.close()
finally:
if loc:
loc.close()
Logs.warn('downloaded %s from %s' % (tool, url))
try:
module = Utils.load_tool(tool, tooldir)
except:
Logs.warn('module %s from %s is unusable' % (tool, url))
try:
os.unlink(_3rdparty + os.sep + tool + '.py')
except:
pass
continue
else:
break
if not module:
Logs.error('Could not load the tool %r or download a suitable replacement from the repository (sys.path %r)\n%s' % (tool, sys.path, e))
raise ex
else:
Logs.error('Could not load the tool %r in %r (try the --download option?):\n%s' % (tool, sys.path, e))
raise ex
if funs is not None:
self.eval_rules(funs)
else:
func = getattr(module, 'detect', None)
if func:
if type(func) is type(find_file): func(self)
else: self.eval_rules(func)
self.tools.append({'tool':tool, 'tooldir':tooldir, 'funs':funs})
def sub_config(self, k):
"executes the configure function of a wscript module"
self.recurse(k, name='configure')
def pre_recurse(self, name_or_mod, path, nexdir):
return {'conf': self, 'ctx': self}
def post_recurse(self, name_or_mod, path, nexdir):
if not autoconfig:
return
self.hash = hash((self.hash, getattr(name_or_mod, 'waf_hash_val', name_or_mod)))
self.files.append(path)
def store(self, file=''):
"save the config results into the cache file"
if not os.path.isdir(self.cachedir):
os.makedirs(self.cachedir)
if not file:
file = open(os.path.join(self.cachedir, 'build.config.py'), 'w')
file.write('version = 0x%x\n' % HEXVERSION)
file.write('tools = %r\n' % self.tools)
file.close()
if not self.all_envs:
self.fatal('nothing to store in the configuration context!')
for key in self.all_envs:
tmpenv = self.all_envs[key]
tmpenv.store(os.path.join(self.cachedir, key + CACHE_SUFFIX))
def set_env_name(self, name, env):
"add a new environment called name"
self.all_envs[name] = env
return env
def retrieve(self, name, fromenv=None):
"retrieve an environment called name"
try:
env = self.all_envs[name]
except KeyError:
env = Environment.Environment()
env['PREFIX'] = os.path.abspath(os.path.expanduser(Options.options.prefix))
self.all_envs[name] = env
else:
if fromenv: warn("The environment %s may have been configured already" % name)
return env
def setenv(self, name):
"enable the environment called name"
self.env = self.retrieve(name)
self.envname = name
def add_os_flags(self, var, dest=None):
# do not use 'get' to make certain the variable is not defined
try: self.env.append_value(dest or var, Utils.to_list(self.environ[var]))
except KeyError: pass
def check_message_1(self, sr):
self.line_just = max(self.line_just, len(sr))
for x in ('\n', self.line_just * '-', '\n', sr, '\n'):
self.log.write(x)
Utils.pprint('NORMAL', "%s :" % sr.ljust(self.line_just), sep='')
def check_message_2(self, sr, color='GREEN'):
self.log.write(sr)
self.log.write('\n')
Utils.pprint(color, sr)
def check_message(self, th, msg, state, option=''):
sr = 'Checking for %s %s' % (th, msg)
self.check_message_1(sr)
p = self.check_message_2
if state: p('ok ' + str(option))
else: p('not found', 'YELLOW')
# FIXME remove in waf 1.6
# the parameter 'option' is not used (kept for compatibility)
def check_message_custom(self, th, msg, custom, option='', color='PINK'):
sr = 'Checking for %s %s' % (th, msg)
self.check_message_1(sr)
self.check_message_2(custom, color)
def msg(self, msg, result, color=None):
"""Prints a configuration message 'Checking for xxx: ok'"""
self.start_msg('Checking for ' + msg)
if not isinstance(color, str):
color = result and 'GREEN' or 'YELLOW'
self.end_msg(result, color)
def start_msg(self, msg):
try:
if self.in_msg:
return
except:
self.in_msg = 0
self.in_msg += 1
self.line_just = max(self.line_just, len(msg))
for x in ('\n', self.line_just * '-', '\n', msg, '\n'):
self.log.write(x)
Utils.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='')
def end_msg(self, result, color):
self.in_msg -= 1
if self.in_msg:
return
if not color:
color = 'GREEN'
if result == True:
msg = 'ok'
elif result == False:
msg = 'not found'
color = 'YELLOW'
else:
msg = str(result)
self.log.write(msg)
self.log.write('\n')
Utils.pprint(color, msg)
def find_program(self, filename, path_list=[], var=None, mandatory=False):
"wrapper that adds a configuration message"
ret = None
if var:
if self.env[var]:
ret = self.env[var]
elif var in os.environ:
ret = os.environ[var]
if not isinstance(filename, list): filename = [filename]
if not ret:
for x in filename:
ret = find_program_impl(self.env, x, path_list, var, environ=self.environ)
if ret: break
self.check_message_1('Checking for program %s' % ' or '.join(filename))
self.log.write(' find program=%r paths=%r var=%r\n -> %r\n' % (filename, path_list, var, ret))
if ret:
Utils.pprint('GREEN', str(ret))
else:
Utils.pprint('YELLOW', 'not found')
if mandatory:
self.fatal('The program %r is required' % filename)
if var:
self.env[var] = ret
return ret
def cmd_to_list(self, cmd):
"commands may be written in pseudo shell like 'ccache g++'"
if isinstance(cmd, str) and cmd.find(' '):
try:
os.stat(cmd)
except OSError:
return shlex.split(cmd)
else:
return [cmd]
return cmd
def __getattr__(self, name):
r = self.__class__.__dict__.get(name, None)
if r: return r
if name and name.startswith('require_'):
for k in ['check_', 'find_']:
n = name.replace('require_', k)
ret = self.__class__.__dict__.get(n, None)
if ret:
def run(*k, **kw):
r = ret(self, *k, **kw)
if not r:
self.fatal('requirement failure')
return r
return run
self.fatal('No such method %r' % name)
def eval_rules(self, rules):
self.rules = Utils.to_list(rules)
for x in self.rules:
f = getattr(self, x)
if not f: self.fatal("No such method '%s'." % x)
try:
f()
except Exception, e:
ret = self.err_handler(x, e)
if ret == BREAK:
break
elif ret == CONTINUE:
continue
else:
self.fatal(e)
def err_handler(self, fun, error):
pass
def conf(f):
"decorator: attach new configuration functions"
setattr(ConfigurationContext, f.__name__, f)
return f
def conftest(f):
"decorator: attach new configuration tests (registered as strings)"
ConfigurationContext.tests[f.__name__] = f
return conf(f)
ntdb-1.0/buildtools/wafadmin/Constants.py 0000664 0000000 0000000 00000002434 12241515307 0020600 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Yinon dot me gmail 2008
"""
these constants are somewhat public, try not to mess them
maintainer: the version number is updated from the top-level wscript file
"""
# do not touch these three lines, they are updated automatically
HEXVERSION=0x105019
WAFVERSION="1.5.19"
WAFREVISION = "9709M"
ABI = 7
# permissions
O644 = 420
O755 = 493
MAXJOBS = 99999999
CACHE_DIR = 'c4che'
CACHE_SUFFIX = '.cache.py'
DBFILE = '.wafpickle-%d' % ABI
WSCRIPT_FILE = 'wscript'
WSCRIPT_BUILD_FILE = 'wscript_build'
WAF_CONFIG_LOG = 'config.log'
WAF_CONFIG_H = 'config.h'
SIG_NIL = 'iluvcuteoverload'
VARIANT = '_VARIANT_'
DEFAULT = 'default'
SRCDIR = 'srcdir'
BLDDIR = 'blddir'
APPNAME = 'APPNAME'
VERSION = 'VERSION'
DEFINES = 'defines'
UNDEFINED = ()
BREAK = "break"
CONTINUE = "continue"
# task scheduler options
JOBCONTROL = "JOBCONTROL"
MAXPARALLEL = "MAXPARALLEL"
NORMAL = "NORMAL"
# task state
NOT_RUN = 0
MISSING = 1
CRASHED = 2
EXCEPTION = 3
SKIPPED = 8
SUCCESS = 9
ASK_LATER = -1
SKIP_ME = -2
RUN_ME = -3
LOG_FORMAT = "%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s"
HOUR_FORMAT = "%H:%M:%S"
TEST_OK = True
CFG_FILES = 'cfg_files'
# positive '->' install
# negative '<-' uninstall
INSTALL = 1337
UNINSTALL = -1337
ntdb-1.0/buildtools/wafadmin/Environment.py 0000664 0000000 0000000 00000011665 12241515307 0021136 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"""Environment representation
There is one gotcha: getitem returns [] if the contents evals to False
This means env['foo'] = {}; print env['foo'] will print [] not {}
"""
import os, copy, re
import Logs, Options, Utils
from Constants import *
re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M)
class Environment(object):
"""A safe-to-use dictionary, but do not attach functions to it please (break cPickle)
An environment instance can be stored into a file and loaded easily
"""
__slots__ = ("table", "parent")
def __init__(self, filename=None):
self.table = {}
#self.parent = None
if filename:
self.load(filename)
def __contains__(self, key):
if key in self.table: return True
try: return self.parent.__contains__(key)
except AttributeError: return False # parent may not exist
def __str__(self):
keys = set()
cur = self
while cur:
keys.update(cur.table.keys())
cur = getattr(cur, 'parent', None)
keys = list(keys)
keys.sort()
return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in keys])
def __getitem__(self, key):
try:
while 1:
x = self.table.get(key, None)
if not x is None:
return x
self = self.parent
except AttributeError:
return []
def __setitem__(self, key, value):
self.table[key] = value
def __delitem__(self, key):
del self.table[key]
def pop(self, key, *args):
if len(args):
return self.table.pop(key, *args)
return self.table.pop(key)
def set_variant(self, name):
self.table[VARIANT] = name
def variant(self):
try:
while 1:
x = self.table.get(VARIANT, None)
if not x is None:
return x
self = self.parent
except AttributeError:
return DEFAULT
def copy(self):
# TODO waf 1.6 rename this method derive, #368
newenv = Environment()
newenv.parent = self
return newenv
def detach(self):
"""TODO try it
modifying the original env will not change the copy"""
tbl = self.get_merged_dict()
try:
delattr(self, 'parent')
except AttributeError:
pass
else:
keys = tbl.keys()
for x in keys:
tbl[x] = copy.deepcopy(tbl[x])
self.table = tbl
def get_flat(self, key):
s = self[key]
if isinstance(s, str): return s
return ' '.join(s)
def _get_list_value_for_modification(self, key):
"""Gets a value that must be a list for further modification. The
list may be modified inplace and there is no need to
"self.table[var] = value" afterwards.
"""
try:
value = self.table[key]
except KeyError:
try: value = self.parent[key]
except AttributeError: value = []
if isinstance(value, list):
value = value[:]
else:
value = [value]
else:
if not isinstance(value, list):
value = [value]
self.table[key] = value
return value
def append_value(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
current_value.extend(value)
else:
current_value.append(value)
def prepend_value(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
current_value = value + current_value
# a new list: update the dictionary entry
self.table[var] = current_value
else:
current_value.insert(0, value)
# prepend unique would be ambiguous
def append_unique(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
for value_item in value:
if value_item not in current_value:
current_value.append(value_item)
else:
if value not in current_value:
current_value.append(value)
def get_merged_dict(self):
"""compute a merged table"""
table_list = []
env = self
while 1:
table_list.insert(0, env.table)
try: env = env.parent
except AttributeError: break
merged_table = {}
for table in table_list:
merged_table.update(table)
return merged_table
def store(self, filename):
"Write the variables into a file"
file = open(filename, 'w')
merged_table = self.get_merged_dict()
keys = list(merged_table.keys())
keys.sort()
for k in keys: file.write('%s = %r\n' % (k, merged_table[k]))
file.close()
def load(self, filename):
"Retrieve the variables from a file"
tbl = self.table
code = Utils.readf(filename)
for m in re_imp.finditer(code):
g = m.group
tbl[g(2)] = eval(g(3))
Logs.debug('env: %s', self.table)
def get_destdir(self):
"return the destdir, useful for installing"
if self.__getitem__('NOINSTALL'): return ''
return Options.options.destdir
def update(self, d):
for k, v in d.iteritems():
self[k] = v
def __getattr__(self, name):
if name in self.__slots__:
return object.__getattr__(self, name)
else:
return self[name]
def __setattr__(self, name, value):
if name in self.__slots__:
object.__setattr__(self, name, value)
else:
self[name] = value
def __delattr__(self, name):
if name in self.__slots__:
object.__delattr__(self, name)
else:
del self[name]
ntdb-1.0/buildtools/wafadmin/Logs.py 0000664 0000000 0000000 00000005474 12241515307 0017537 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
import ansiterm
import os, re, logging, traceback, sys
from Constants import *
zones = ''
verbose = 0
colors_lst = {
'USE' : True,
'BOLD' :'\x1b[01;1m',
'RED' :'\x1b[01;31m',
'GREEN' :'\x1b[32m',
'YELLOW':'\x1b[33m',
'PINK' :'\x1b[35m',
'BLUE' :'\x1b[01;34m',
'CYAN' :'\x1b[36m',
'NORMAL':'\x1b[0m',
'cursor_on' :'\x1b[?25h',
'cursor_off' :'\x1b[?25l',
}
got_tty = False
term = os.environ.get('TERM', 'dumb')
if not term in ['dumb', 'emacs']:
try:
got_tty = sys.stderr.isatty() or (sys.platform == 'win32' and term in ['xterm', 'msys'])
except AttributeError:
pass
import Utils
if not got_tty or 'NOCOLOR' in os.environ:
colors_lst['USE'] = False
# test
#if sys.platform == 'win32':
# colors_lst['USE'] = True
def get_color(cl):
if not colors_lst['USE']: return ''
return colors_lst.get(cl, '')
class foo(object):
def __getattr__(self, a):
return get_color(a)
def __call__(self, a):
return get_color(a)
colors = foo()
re_log = re.compile(r'(\w+): (.*)', re.M)
class log_filter(logging.Filter):
def __init__(self, name=None):
pass
def filter(self, rec):
rec.c1 = colors.PINK
rec.c2 = colors.NORMAL
rec.zone = rec.module
if rec.levelno >= logging.INFO:
if rec.levelno >= logging.ERROR:
rec.c1 = colors.RED
elif rec.levelno >= logging.WARNING:
rec.c1 = colors.YELLOW
else:
rec.c1 = colors.GREEN
return True
zone = ''
m = re_log.match(rec.msg)
if m:
zone = rec.zone = m.group(1)
rec.msg = m.group(2)
if zones:
return getattr(rec, 'zone', '') in zones or '*' in zones
elif not verbose > 2:
return False
return True
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT)
def format(self, rec):
if rec.levelno >= logging.WARNING or rec.levelno == logging.INFO:
try:
return '%s%s%s' % (rec.c1, rec.msg.decode('utf-8'), rec.c2)
except:
return rec.c1+rec.msg+rec.c2
return logging.Formatter.format(self, rec)
def debug(*k, **kw):
if verbose:
k = list(k)
k[0] = k[0].replace('\n', ' ')
logging.debug(*k, **kw)
def error(*k, **kw):
logging.error(*k, **kw)
if verbose > 1:
if isinstance(k[0], Utils.WafError):
st = k[0].stack
else:
st = traceback.extract_stack()
if st:
st = st[:-1]
buf = []
for filename, lineno, name, line in st:
buf.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
buf.append(' %s' % line.strip())
if buf: logging.error("\n".join(buf))
warn = logging.warn
info = logging.info
def init_log():
log = logging.getLogger()
log.handlers = []
log.filters = []
hdlr = logging.StreamHandler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
# may be initialized more than once
init_log()
ntdb-1.0/buildtools/wafadmin/Node.py 0000664 0000000 0000000 00000044437 12241515307 0017522 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"""
Node: filesystem structure, contains lists of nodes
IMPORTANT:
1. Each file/folder is represented by exactly one node.
2. Most would-be class properties are stored in Build: nodes to depend on, signature, flags, ..
unused class members increase the .wafpickle file size sensibly with lots of objects.
3. The build is launched from the top of the build dir (for example, in _build_/).
4. Node should not be instantiated directly.
Each instance of Build.BuildContext has a Node subclass.
(aka: 'Nodu', see BuildContext initializer)
The BuildContext is referenced here as self.__class__.bld
Its Node class is referenced here as self.__class__
The public and advertised apis are the following:
${TGT} -> dir/to/file.ext
${TGT[0].base()} -> dir/to/file
${TGT[0].dir(env)} -> dir/to
${TGT[0].file()} -> file.ext
${TGT[0].file_base()} -> file
${TGT[0].suffix()} -> .ext
${TGT[0].abspath(env)} -> /path/to/dir/to/file.ext
"""
import os, sys, fnmatch, re, stat
import Utils, Constants
UNDEFINED = 0
DIR = 1
FILE = 2
BUILD = 3
type_to_string = {UNDEFINED: "unk", DIR: "dir", FILE: "src", BUILD: "bld"}
# These fnmatch expressions are used by default to prune the directory tree
# while doing the recursive traversal in the find_iter method of the Node class.
prune_pats = '.git .bzr .hg .svn _MTN _darcs CVS SCCS'.split()
# These fnmatch expressions are used by default to exclude files and dirs
# while doing the recursive traversal in the find_iter method of the Node class.
exclude_pats = prune_pats + '*~ #*# .#* %*% ._* .gitignore .cvsignore vssver.scc .DS_Store'.split()
# These Utils.jar_regexp expressions are used by default to exclude files and dirs and also prune the directory tree
# while doing the recursive traversal in the ant_glob method of the Node class.
exclude_regs = '''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/_darcs
**/_darcs/**
**/.DS_Store'''
class Node(object):
__slots__ = ("name", "parent", "id", "childs")
def __init__(self, name, parent, node_type = UNDEFINED):
self.name = name
self.parent = parent
# assumption: one build object at a time
self.__class__.bld.id_nodes += 4
self.id = self.__class__.bld.id_nodes + node_type
if node_type == DIR: self.childs = {}
# We do not want to add another type attribute (memory)
# use the id to find out: type = id & 3
# for setting: new type = type + x - type & 3
if parent and name in parent.childs:
raise Utils.WafError('node %s exists in the parent files %r already' % (name, parent))
if parent: parent.childs[name] = self
def __setstate__(self, data):
if len(data) == 4:
(self.parent, self.name, self.id, self.childs) = data
else:
(self.parent, self.name, self.id) = data
def __getstate__(self):
if getattr(self, 'childs', None) is None:
return (self.parent, self.name, self.id)
else:
return (self.parent, self.name, self.id, self.childs)
def __str__(self):
if not self.parent: return ''
return "%s://%s" % (type_to_string[self.id & 3], self.abspath())
def __repr__(self):
return self.__str__()
def __hash__(self):
"expensive, make certain it is not used"
raise Utils.WafError('nodes, you are doing it wrong')
def __copy__(self):
"nodes are not supposed to be copied"
raise Utils.WafError('nodes are not supposed to be cloned')
def get_type(self):
return self.id & 3
def set_type(self, t):
"dangerous, you are not supposed to use this"
self.id = self.id + t - self.id & 3
def dirs(self):
return [x for x in self.childs.values() if x.id & 3 == DIR]
def files(self):
return [x for x in self.childs.values() if x.id & 3 == FILE]
def get_dir(self, name, default=None):
node = self.childs.get(name, None)
if not node or node.id & 3 != DIR: return default
return node
def get_file(self, name, default=None):
node = self.childs.get(name, None)
if not node or node.id & 3 != FILE: return default
return node
def get_build(self, name, default=None):
node = self.childs.get(name, None)
if not node or node.id & 3 != BUILD: return default
return node
def find_resource(self, lst):
"Find an existing input file: either a build node declared previously or a source node"
if isinstance(lst, str):
lst = Utils.split_path(lst)
if len(lst) == 1:
parent = self
else:
parent = self.find_dir(lst[:-1])
if not parent: return None
self.__class__.bld.rescan(parent)
name = lst[-1]
node = parent.childs.get(name, None)
if node:
tp = node.id & 3
if tp == FILE or tp == BUILD:
return node
else:
return None
tree = self.__class__.bld
if not name in tree.cache_dir_contents[parent.id]:
return None
path = parent.abspath() + os.sep + name
try:
st = Utils.h_file(path)
except IOError:
return None
child = self.__class__(name, parent, FILE)
tree.node_sigs[0][child.id] = st
return child
def find_or_declare(self, lst):
"Used for declaring a build node representing a file being built"
if isinstance(lst, str):
lst = Utils.split_path(lst)
if len(lst) == 1:
parent = self
else:
parent = self.find_dir(lst[:-1])
if not parent: return None
self.__class__.bld.rescan(parent)
name = lst[-1]
node = parent.childs.get(name, None)
if node:
tp = node.id & 3
if tp != BUILD:
raise Utils.WafError('find_or_declare found a source file where a build file was expected %r' % '/'.join(lst))
return node
node = self.__class__(name, parent, BUILD)
return node
def find_dir(self, lst):
"search a folder in the filesystem"
if isinstance(lst, str):
lst = Utils.split_path(lst)
current = self
for name in lst:
self.__class__.bld.rescan(current)
prev = current
if not current.parent and name == current.name:
continue
elif not name:
continue
elif name == '.':
continue
elif name == '..':
current = current.parent or current
else:
current = prev.childs.get(name, None)
if current is None:
dir_cont = self.__class__.bld.cache_dir_contents
if prev.id in dir_cont and name in dir_cont[prev.id]:
if not prev.name:
if os.sep == '/':
# cygwin //machine/share
dirname = os.sep + name
else:
# windows c:
dirname = name
else:
# regular path
dirname = prev.abspath() + os.sep + name
if not os.path.isdir(dirname):
return None
current = self.__class__(name, prev, DIR)
elif (not prev.name and len(name) == 2 and name[1] == ':') or name.startswith('\\\\'):
# drive letter or \\ path for windows
current = self.__class__(name, prev, DIR)
else:
return None
else:
if current.id & 3 != DIR:
return None
return current
def ensure_dir_node_from_path(self, lst):
"used very rarely, force the construction of a branch of node instance for representing folders"
if isinstance(lst, str):
lst = Utils.split_path(lst)
current = self
for name in lst:
if not name:
continue
elif name == '.':
continue
elif name == '..':
current = current.parent or current
else:
prev = current
current = prev.childs.get(name, None)
if current is None:
current = self.__class__(name, prev, DIR)
return current
def exclusive_build_node(self, path):
"""
create a hierarchy in the build dir (no source folders) for ill-behaving compilers
the node is not hashed, so you must do it manually
after declaring such a node, find_dir and find_resource should work as expected
"""
lst = Utils.split_path(path)
name = lst[-1]
if len(lst) > 1:
parent = None
try:
parent = self.find_dir(lst[:-1])
except OSError:
pass
if not parent:
parent = self.ensure_dir_node_from_path(lst[:-1])
self.__class__.bld.rescan(parent)
else:
try:
self.__class__.bld.rescan(parent)
except OSError:
pass
else:
parent = self
node = parent.childs.get(name, None)
if not node:
node = self.__class__(name, parent, BUILD)
return node
def path_to_parent(self, parent):
"path relative to a direct ancestor, as string"
lst = []
p = self
h1 = parent.height()
h2 = p.height()
while h2 > h1:
h2 -= 1
lst.append(p.name)
p = p.parent
if lst:
lst.reverse()
ret = os.path.join(*lst)
else:
ret = ''
return ret
def find_ancestor(self, node):
"find a common ancestor for two nodes - for the shortest path in hierarchy"
dist = self.height() - node.height()
if dist < 0: return node.find_ancestor(self)
# now the real code
cand = self
while dist > 0:
cand = cand.parent
dist -= 1
if cand == node: return cand
cursor = node
while cand.parent:
cand = cand.parent
cursor = cursor.parent
if cand == cursor: return cand
def relpath_gen(self, from_node):
"string representing a relative path between self to another node"
if self == from_node: return '.'
if from_node.parent == self: return '..'
# up_path is '../../../' and down_path is 'dir/subdir/subdir/file'
ancestor = self.find_ancestor(from_node)
lst = []
cand = self
while not cand.id == ancestor.id:
lst.append(cand.name)
cand = cand.parent
cand = from_node
while not cand.id == ancestor.id:
lst.append('..')
cand = cand.parent
lst.reverse()
return os.sep.join(lst)
def nice_path(self, env=None):
"printed in the console, open files easily from the launch directory"
tree = self.__class__.bld
ln = tree.launch_node()
if self.id & 3 == FILE: return self.relpath_gen(ln)
else: return os.path.join(tree.bldnode.relpath_gen(ln), env.variant(), self.relpath_gen(tree.srcnode))
def is_child_of(self, node):
"does this node belong to the subtree node"
p = self
diff = self.height() - node.height()
while diff > 0:
diff -= 1
p = p.parent
return p.id == node.id
def variant(self, env):
"variant, or output directory for this node, a source has for variant 0"
if not env: return 0
elif self.id & 3 == FILE: return 0
else: return env.variant()
def height(self):
"amount of parents"
# README a cache can be added here if necessary
d = self
val = -1
while d:
d = d.parent
val += 1
return val
# helpers for building things
def abspath(self, env=None):
"""
absolute path
@param env [Environment]:
* obligatory for build nodes: build/variant/src/dir/bar.o
* optional for dirs: get either src/dir or build/variant/src/dir
* excluded for source nodes: src/dir/bar.c
Instead of computing the absolute path each time again,
store the already-computed absolute paths in one of (variants+1) dictionaries:
bld.cache_node_abspath[0] holds absolute paths for source nodes.
bld.cache_node_abspath[variant] holds the absolute path for the build nodes
which reside in the variant given by env.
"""
## absolute path - hot zone, so do not touch
# less expensive
variant = (env and (self.id & 3 != FILE) and env.variant()) or 0
ret = self.__class__.bld.cache_node_abspath[variant].get(self.id, None)
if ret: return ret
if not variant:
# source directory
if not self.parent:
val = os.sep == '/' and os.sep or ''
elif not self.parent.name: # root
val = (os.sep == '/' and os.sep or '') + self.name
else:
val = self.parent.abspath() + os.sep + self.name
else:
# build directory
val = os.sep.join((self.__class__.bld.bldnode.abspath(), variant, self.path_to_parent(self.__class__.bld.srcnode)))
self.__class__.bld.cache_node_abspath[variant][self.id] = val
return val
def change_ext(self, ext):
"node of the same path, but with a different extension - hot zone so do not touch"
name = self.name
k = name.rfind('.')
if k >= 0:
name = name[:k] + ext
else:
name = name + ext
return self.parent.find_or_declare([name])
def src_dir(self, env):
"src path without the file name"
return self.parent.srcpath(env)
def bld_dir(self, env):
"build path without the file name"
return self.parent.bldpath(env)
def bld_base(self, env):
"build path without the extension: src/dir/foo(.cpp)"
s = os.path.splitext(self.name)[0]
return os.path.join(self.bld_dir(env), s)
def bldpath(self, env=None):
"path seen from the build dir default/src/foo.cpp"
if self.id & 3 == FILE:
return self.relpath_gen(self.__class__.bld.bldnode)
p = self.path_to_parent(self.__class__.bld.srcnode)
if p is not '':
return env.variant() + os.sep + p
return env.variant()
def srcpath(self, env=None):
"path in the srcdir from the build dir ../src/foo.cpp"
if self.id & 3 == BUILD:
return self.bldpath(env)
return self.relpath_gen(self.__class__.bld.bldnode)
def read(self, env):
"get the contents of a file, it is not used anywhere for the moment"
return Utils.readf(self.abspath(env))
def dir(self, env):
"scons-like"
return self.parent.abspath(env)
def file(self):
"scons-like"
return self.name
def file_base(self):
"scons-like"
return os.path.splitext(self.name)[0]
def suffix(self):
"scons-like - hot zone so do not touch"
k = max(0, self.name.rfind('.'))
return self.name[k:]
def find_iter_impl(self, src=True, bld=True, dir=True, accept_name=None, is_prune=None, maxdepth=25):
"""find nodes in the filesystem hierarchy, try to instanciate the nodes passively; same gotcha as ant_glob"""
bld_ctx = self.__class__.bld
bld_ctx.rescan(self)
for name in bld_ctx.cache_dir_contents[self.id]:
if accept_name(self, name):
node = self.find_resource(name)
if node:
if src and node.id & 3 == FILE:
yield node
else:
node = self.find_dir(name)
if node and node.id != bld_ctx.bldnode.id:
if dir:
yield node
if not is_prune(self, name):
if maxdepth:
for k in node.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth - 1):
yield k
else:
if not is_prune(self, name):
node = self.find_resource(name)
if not node:
# not a file, it is a dir
node = self.find_dir(name)
if node and node.id != bld_ctx.bldnode.id:
if maxdepth:
for k in node.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth - 1):
yield k
if bld:
for node in self.childs.values():
if node.id == bld_ctx.bldnode.id:
continue
if node.id & 3 == BUILD:
if accept_name(self, node.name):
yield node
raise StopIteration
def find_iter(self, in_pat=['*'], ex_pat=exclude_pats, prune_pat=prune_pats, src=True, bld=True, dir=False, maxdepth=25, flat=False):
"""find nodes recursively, this returns everything but folders by default; same gotcha as ant_glob"""
if not (src or bld or dir):
raise StopIteration
if self.id & 3 != DIR:
raise StopIteration
in_pat = Utils.to_list(in_pat)
ex_pat = Utils.to_list(ex_pat)
prune_pat = Utils.to_list(prune_pat)
def accept_name(node, name):
for pat in ex_pat:
if fnmatch.fnmatchcase(name, pat):
return False
for pat in in_pat:
if fnmatch.fnmatchcase(name, pat):
return True
return False
def is_prune(node, name):
for pat in prune_pat:
if fnmatch.fnmatchcase(name, pat):
return True
return False
ret = self.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth)
if flat:
return " ".join([x.relpath_gen(self) for x in ret])
return ret
def ant_glob(self, *k, **kw):
"""
known gotcha: will enumerate the files, but only if the folder exists in the source directory
"""
src=kw.get('src', 1)
bld=kw.get('bld', 0)
dir=kw.get('dir', 0)
excl = kw.get('excl', exclude_regs)
incl = k and k[0] or kw.get('incl', '**')
def to_pat(s):
lst = Utils.to_list(s)
ret = []
for x in lst:
x = x.replace('//', '/')
if x.endswith('/'):
x += '**'
lst2 = x.split('/')
accu = []
for k in lst2:
if k == '**':
accu.append(k)
else:
k = k.replace('.', '[.]').replace('*', '.*').replace('?', '.')
k = '^%s$' % k
#print "pattern", k
accu.append(re.compile(k))
ret.append(accu)
return ret
def filtre(name, nn):
ret = []
for lst in nn:
if not lst:
pass
elif lst[0] == '**':
ret.append(lst)
if len(lst) > 1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def accept(name, pats):
nacc = filtre(name, pats[0])
nrej = filtre(name, pats[1])
if [] in nrej:
nacc = []
return [nacc, nrej]
def ant_iter(nodi, maxdepth=25, pats=[]):
nodi.__class__.bld.rescan(nodi)
tmp = list(nodi.__class__.bld.cache_dir_contents[nodi.id])
tmp.sort()
for name in tmp:
npats = accept(name, pats)
if npats and npats[0]:
accepted = [] in npats[0]
#print accepted, nodi, name
node = nodi.find_resource(name)
if node and accepted:
if src and node.id & 3 == FILE:
yield node
else:
node = nodi.find_dir(name)
if node and node.id != nodi.__class__.bld.bldnode.id:
if accepted and dir:
yield node
if maxdepth:
for k in ant_iter(node, maxdepth=maxdepth - 1, pats=npats):
yield k
if bld:
for node in nodi.childs.values():
if node.id == nodi.__class__.bld.bldnode.id:
continue
if node.id & 3 == BUILD:
npats = accept(node.name, pats)
if npats and npats[0] and [] in npats[0]:
yield node
raise StopIteration
ret = [x for x in ant_iter(self, pats=[to_pat(incl), to_pat(excl)])]
if kw.get('flat', True):
return " ".join([x.relpath_gen(self) for x in ret])
return ret
def update_build_dir(self, env=None):
if not env:
for env in bld.all_envs:
self.update_build_dir(env)
return
path = self.abspath(env)
lst = Utils.listdir(path)
try:
self.__class__.bld.cache_dir_contents[self.id].update(lst)
except KeyError:
self.__class__.bld.cache_dir_contents[self.id] = set(lst)
self.__class__.bld.cache_scanned_folders[self.id] = True
for k in lst:
npath = path + os.sep + k
st = os.stat(npath)
if stat.S_ISREG(st[stat.ST_MODE]):
ick = self.find_or_declare(k)
if not (ick.id in self.__class__.bld.node_sigs[env.variant()]):
self.__class__.bld.node_sigs[env.variant()][ick.id] = Constants.SIG_NIL
elif stat.S_ISDIR(st[stat.ST_MODE]):
child = self.find_dir(k)
if not child:
child = self.ensure_dir_node_from_path(k)
child.update_build_dir(env)
class Nodu(Node):
pass
ntdb-1.0/buildtools/wafadmin/Options.py 0000664 0000000 0000000 00000017213 12241515307 0020260 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Scott Newton, 2005 (scottn)
# Thomas Nagy, 2006 (ita)
"Custom command-line options"
import os, sys, imp, types, tempfile, optparse
import Logs, Utils
from Constants import *
cmds = 'distclean configure build install clean uninstall check dist distcheck'.split()
# TODO remove in waf 1.6 the following two
commands = {}
is_install = False
options = {}
arg_line = []
launch_dir = ''
tooldir = ''
lockfile = os.environ.get('WAFLOCK', '.lock-wscript')
try: cache_global = os.path.abspath(os.environ['WAFCACHE'])
except KeyError: cache_global = ''
platform = Utils.unversioned_sys_platform()
conf_file = 'conf-runs-%s-%d.pickle' % (platform, ABI)
remote_repo = ['http://waf.googlecode.com/svn/']
"""remote directory for the plugins"""
# Such a command-line should work: JOBS=4 PREFIX=/opt/ DESTDIR=/tmp/ahoj/ waf configure
default_prefix = os.environ.get('PREFIX')
if not default_prefix:
if platform == 'win32':
d = tempfile.gettempdir()
default_prefix = d[0].upper() + d[1:]
# win32 preserves the case, but gettempdir does not
else: default_prefix = '/usr/local/'
default_jobs = os.environ.get('JOBS', -1)
if default_jobs < 1:
try:
if 'SC_NPROCESSORS_ONLN' in os.sysconf_names:
default_jobs = os.sysconf('SC_NPROCESSORS_ONLN')
else:
default_jobs = int(Utils.cmd_output(['sysctl', '-n', 'hw.ncpu']))
except:
if os.name == 'java': # platform.system() == 'Java'
from java.lang import Runtime
default_jobs = Runtime.getRuntime().availableProcessors()
else:
# environment var defined on win32
default_jobs = int(os.environ.get('NUMBER_OF_PROCESSORS', 1))
default_destdir = os.environ.get('DESTDIR', '')
def get_usage(self):
cmds_str = []
module = Utils.g_module
if module:
# create the help messages for commands
tbl = module.__dict__
keys = list(tbl.keys())
keys.sort()
if 'build' in tbl:
if not module.build.__doc__:
module.build.__doc__ = 'builds the project'
if 'configure' in tbl:
if not module.configure.__doc__:
module.configure.__doc__ = 'configures the project'
ban = ['set_options', 'init', 'shutdown']
optlst = [x for x in keys if not x in ban
and type(tbl[x]) is type(parse_args_impl)
and tbl[x].__doc__
and not x.startswith('_')]
just = max([len(x) for x in optlst])
for x in optlst:
cmds_str.append(' %s: %s' % (x.ljust(just), tbl[x].__doc__))
ret = '\n'.join(cmds_str)
else:
ret = ' '.join(cmds)
return '''waf [command] [options]
Main commands (example: ./waf build -j4)
%s
''' % ret
setattr(optparse.OptionParser, 'get_usage', get_usage)
def create_parser(module=None):
Logs.debug('options: create_parser is called')
parser = optparse.OptionParser(conflict_handler="resolve", version = 'waf %s (%s)' % (WAFVERSION, WAFREVISION))
parser.formatter.width = Utils.get_term_cols()
p = parser.add_option
p('-j', '--jobs',
type = 'int',
default = default_jobs,
help = 'amount of parallel jobs (%r)' % default_jobs,
dest = 'jobs')
p('-k', '--keep',
action = 'store_true',
default = False,
help = 'keep running happily on independent task groups',
dest = 'keep')
p('-v', '--verbose',
action = 'count',
default = 0,
help = 'verbosity level -v -vv or -vvv [default: 0]',
dest = 'verbose')
p('--nocache',
action = 'store_true',
default = False,
help = 'ignore the WAFCACHE (if set)',
dest = 'nocache')
p('--zones',
action = 'store',
default = '',
help = 'debugging zones (task_gen, deps, tasks, etc)',
dest = 'zones')
p('-p', '--progress',
action = 'count',
default = 0,
help = '-p: progress bar; -pp: ide output',
dest = 'progress_bar')
p('--targets',
action = 'store',
default = '',
help = 'build given task generators, e.g. "target1,target2"',
dest = 'compile_targets')
gr = optparse.OptionGroup(parser, 'configuration options')
parser.add_option_group(gr)
gr.add_option('-b', '--blddir',
action = 'store',
default = '',
help = 'out dir for the project (configuration)',
dest = 'blddir')
gr.add_option('-s', '--srcdir',
action = 'store',
default = '',
help = 'top dir for the project (configuration)',
dest = 'srcdir')
gr.add_option('--prefix',
help = 'installation prefix (configuration) [default: %r]' % default_prefix,
default = default_prefix,
dest = 'prefix')
gr.add_option('--download',
action = 'store_true',
default = False,
help = 'try to download the tools if missing',
dest = 'download')
gr = optparse.OptionGroup(parser, 'installation options')
parser.add_option_group(gr)
gr.add_option('--destdir',
help = 'installation root [default: %r]' % default_destdir,
default = default_destdir,
dest = 'destdir')
gr.add_option('-f', '--force',
action = 'store_true',
default = False,
help = 'force file installation',
dest = 'force')
return parser
def parse_args_impl(parser, _args=None):
global options, commands, arg_line
(options, args) = parser.parse_args(args=_args)
arg_line = args
#arg_line = args[:] # copy
# By default, 'waf' is equivalent to 'waf build'
commands = {}
for var in cmds: commands[var] = 0
if not args:
commands['build'] = 1
args.append('build')
# Parse the command arguments
for arg in args:
commands[arg] = True
# the check thing depends on the build
if 'check' in args:
idx = args.index('check')
try:
bidx = args.index('build')
if bidx > idx:
raise ValueError('build before check')
except ValueError, e:
args.insert(idx, 'build')
if args[0] != 'init':
args.insert(0, 'init')
# TODO -k => -j0
if options.keep: options.jobs = 1
if options.jobs < 1: options.jobs = 1
if 'install' in sys.argv or 'uninstall' in sys.argv:
# absolute path only if set
options.destdir = options.destdir and os.path.abspath(os.path.expanduser(options.destdir))
Logs.verbose = options.verbose
Logs.init_log()
if options.zones:
Logs.zones = options.zones.split(',')
if not Logs.verbose: Logs.verbose = 1
elif Logs.verbose > 0:
Logs.zones = ['runner']
if Logs.verbose > 2:
Logs.zones = ['*']
# TODO waf 1.6
# 1. rename the class to OptionsContext
# 2. instead of a class attribute, use a module (static 'parser')
# 3. parse_args_impl was made in times when we did not know about binding new methods to classes
class Handler(Utils.Context):
"""loads wscript modules in folders for adding options
This class should be named 'OptionsContext'
A method named 'recurse' is bound when used by the module Scripting"""
parser = None
# make it possible to access the reference, like Build.bld
def __init__(self, module=None):
self.parser = create_parser(module)
self.cwd = os.getcwd()
Handler.parser = self
def add_option(self, *k, **kw):
self.parser.add_option(*k, **kw)
def add_option_group(self, *k, **kw):
return self.parser.add_option_group(*k, **kw)
def get_option_group(self, opt_str):
return self.parser.get_option_group(opt_str)
def sub_options(self, *k, **kw):
if not k: raise Utils.WscriptError('folder expected')
self.recurse(k[0], name='set_options')
def tool_options(self, *k, **kw):
Utils.python_24_guard()
if not k[0]:
raise Utils.WscriptError('invalid tool_options call %r %r' % (k, kw))
tools = Utils.to_list(k[0])
# TODO waf 1.6 remove the global variable tooldir
path = Utils.to_list(kw.get('tdir', kw.get('tooldir', tooldir)))
for tool in tools:
tool = tool.replace('++', 'xx')
if tool == 'java': tool = 'javaw'
if tool.lower() == 'unittest': tool = 'unittestw'
module = Utils.load_tool(tool, path)
try:
fun = module.set_options
except AttributeError:
pass
else:
fun(kw.get('option_group', self))
def parse_args(self, args=None):
parse_args_impl(self.parser, args)
ntdb-1.0/buildtools/wafadmin/Runner.py 0000664 0000000 0000000 00000012664 12241515307 0020103 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"Execute the tasks"
import os, sys, random, time, threading, traceback
try: from Queue import Queue
except ImportError: from queue import Queue
import Build, Utils, Logs, Options
from Logs import debug, error
from Constants import *
GAP = 15
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
def process_task(tsk):
m = tsk.master
if m.stop:
m.out.put(tsk)
return
try:
tsk.generator.bld.printout(tsk.display())
if tsk.__class__.stat: ret = tsk.__class__.stat(tsk)
# actual call to task's run() function
else: ret = tsk.call_run()
except Exception, e:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
# TODO cleanup
m.error_handler(tsk)
m.out.put(tsk)
return
if ret:
tsk.err_code = ret
tsk.hasrun = CRASHED
else:
try:
tsk.post_run()
except Utils.WafError:
pass
except Exception:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
else:
tsk.hasrun = SUCCESS
if tsk.hasrun != SUCCESS:
m.error_handler(tsk)
m.out.put(tsk)
class TaskConsumer(threading.Thread):
ready = Queue(0)
consumers = []
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(1)
self.start()
def run(self):
try:
self.loop()
except:
pass
def loop(self):
while 1:
tsk = TaskConsumer.ready.get()
process_task(tsk)
class Parallel(object):
"""
keep the consumer threads busy, and avoid consuming cpu cycles
when no more tasks can be added (end of the build, etc)
"""
def __init__(self, bld, j=2):
# number of consumers
self.numjobs = j
self.manager = bld.task_manager
self.manager.current_group = 0
self.total = self.manager.total()
# tasks waiting to be processed - IMPORTANT
self.outstanding = []
self.maxjobs = MAXJOBS
# tasks that are awaiting for another task to complete
self.frozen = []
# tasks returned by the consumers
self.out = Queue(0)
self.count = 0 # tasks not in the producer area
self.processed = 1 # progress indicator
self.stop = False # error condition to stop the build
self.error = False # error flag
def get_next(self):
"override this method to schedule the tasks in a particular order"
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self, tsk):
"override this method to schedule the tasks in a particular order"
# TODO consider using a deque instead
if random.randint(0, 1):
self.frozen.insert(0, tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
"called to set the next group of tasks"
while self.count > self.numjobs + GAP or self.count >= self.maxjobs:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
if self.frozen:
self.outstanding += self.frozen
self.frozen = []
elif not self.count:
(jobs, tmp) = self.manager.get_next_set()
if jobs != None: self.maxjobs = jobs
if tmp: self.outstanding += tmp
break
def get_out(self):
"the tasks that are put to execute are all collected using get_out"
ret = self.out.get()
self.manager.add_finished(ret)
if not self.stop and getattr(ret, 'more_tasks', None):
self.outstanding += ret.more_tasks
self.total += len(ret.more_tasks)
self.count -= 1
def error_handler(self, tsk):
"by default, errors make the build stop (not thread safe so be careful)"
if not Options.options.keep:
self.stop = True
self.error = True
def start(self):
"execute the tasks"
if TaskConsumer.consumers:
# the worker pool is usually loaded lazily (see below)
# in case it is re-used with a different value of numjobs:
while len(TaskConsumer.consumers) < self.numjobs:
TaskConsumer.consumers.append(TaskConsumer())
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
self.manager.add_finished(tsk)
continue
try:
st = tsk.runnable_status()
except Exception, e:
self.processed += 1
if self.stop and not Options.options.keep:
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
continue
self.error_handler(tsk)
self.manager.add_finished(tsk)
tsk.hasrun = EXCEPTION
tsk.err_msg = Utils.ex_stack()
continue
if st == ASK_LATER:
self.postpone(tsk)
elif st == SKIP_ME:
self.processed += 1
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
else:
# run me: put the task in ready queue
tsk.position = (self.processed, self.total)
self.count += 1
tsk.master = self
self.processed += 1
if self.numjobs == 1:
process_task(tsk)
else:
TaskConsumer.ready.put(tsk)
# create the consumer threads only if there is something to consume
if not TaskConsumer.consumers:
TaskConsumer.consumers = [TaskConsumer() for i in xrange(self.numjobs)]
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
#print loop
assert (self.count == 0 or self.stop)
ntdb-1.0/buildtools/wafadmin/Scripting.py 0000664 0000000 0000000 00000035703 12241515307 0020573 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"Module called for configuring, compiling and installing targets"
import os, sys, shutil, traceback, datetime, inspect, errno
import Utils, Configure, Build, Logs, Options, Environment, Task
from Logs import error, warn, info
from Constants import *
g_gz = 'bz2'
commands = []
def prepare_impl(t, cwd, ver, wafdir):
Options.tooldir = [t]
Options.launch_dir = cwd
# some command-line options can be processed immediately
if '--version' in sys.argv:
opt_obj = Options.Handler()
opt_obj.curdir = cwd
opt_obj.parse_args()
sys.exit(0)
# now find the wscript file
msg1 = 'Waf: Please run waf from a directory containing a file named "%s" or run distclean' % WSCRIPT_FILE
# in theory projects can be configured in an autotool-like manner:
# mkdir build && cd build && ../waf configure && ../waf
build_dir_override = None
candidate = None
lst = os.listdir(cwd)
search_for_candidate = True
if WSCRIPT_FILE in lst:
candidate = cwd
elif 'configure' in sys.argv and not WSCRIPT_BUILD_FILE in lst:
# autotool-like configuration
calldir = os.path.abspath(os.path.dirname(sys.argv[0]))
if WSCRIPT_FILE in os.listdir(calldir):
candidate = calldir
search_for_candidate = False
else:
error('arg[0] directory does not contain a wscript file')
sys.exit(1)
build_dir_override = cwd
# climb up to find a script if it is not found
while search_for_candidate:
if len(cwd) <= 3:
break # stop at / or c:
dirlst = os.listdir(cwd)
if WSCRIPT_FILE in dirlst:
candidate = cwd
if 'configure' in sys.argv and candidate:
break
if Options.lockfile in dirlst:
env = Environment.Environment()
try:
env.load(os.path.join(cwd, Options.lockfile))
except:
error('could not load %r' % Options.lockfile)
try:
os.stat(env['cwd'])
except:
candidate = cwd
else:
candidate = env['cwd']
break
cwd = os.path.dirname(cwd) # climb up
if not candidate:
# check if the user only wanted to display the help
if '-h' in sys.argv or '--help' in sys.argv:
warn('No wscript file found: the help message may be incomplete')
opt_obj = Options.Handler()
opt_obj.curdir = cwd
opt_obj.parse_args()
else:
error(msg1)
sys.exit(0)
# We have found wscript, but there is no guarantee that it is valid
try:
os.chdir(candidate)
except OSError:
raise Utils.WafError("the folder %r is unreadable" % candidate)
# define the main module containing the functions init, shutdown, ..
Utils.set_main_module(os.path.join(candidate, WSCRIPT_FILE))
if build_dir_override:
d = getattr(Utils.g_module, BLDDIR, None)
if d:
# test if user has set the blddir in wscript.
msg = ' Overriding build directory %s with %s' % (d, build_dir_override)
warn(msg)
Utils.g_module.blddir = build_dir_override
# bind a few methods and classes by default
def set_def(obj, name=''):
n = name or obj.__name__
if not n in Utils.g_module.__dict__:
setattr(Utils.g_module, n, obj)
for k in [dist, distclean, distcheck, clean, install, uninstall]:
set_def(k)
set_def(Configure.ConfigurationContext, 'configure_context')
for k in ['build', 'clean', 'install', 'uninstall']:
set_def(Build.BuildContext, k + '_context')
# now parse the options from the user wscript file
opt_obj = Options.Handler(Utils.g_module)
opt_obj.curdir = candidate
try:
f = Utils.g_module.set_options
except AttributeError:
pass
else:
opt_obj.sub_options([''])
opt_obj.parse_args()
if not 'init' in Utils.g_module.__dict__:
Utils.g_module.init = Utils.nada
if not 'shutdown' in Utils.g_module.__dict__:
Utils.g_module.shutdown = Utils.nada
main()
def prepare(t, cwd, ver, wafdir):
if WAFVERSION != ver:
msg = 'Version mismatch: waf %s <> wafadmin %s (wafdir %s)' % (ver, WAFVERSION, wafdir)
print('\033[91mError: %s\033[0m' % msg)
sys.exit(1)
#"""
try:
prepare_impl(t, cwd, ver, wafdir)
except Utils.WafError, e:
error(str(e))
sys.exit(1)
except KeyboardInterrupt:
Utils.pprint('RED', 'Interrupted')
sys.exit(68)
"""
import cProfile, pstats
cProfile.runctx("import Scripting; Scripting.prepare_impl(t, cwd, ver, wafdir)", {},
{'t': t, 'cwd':cwd, 'ver':ver, 'wafdir':wafdir},
'profi.txt')
p = pstats.Stats('profi.txt')
p.sort_stats('time').print_stats(45)
#"""
def main():
global commands
commands = Options.arg_line[:]
while commands:
x = commands.pop(0)
ini = datetime.datetime.now()
if x == 'configure':
fun = configure
elif x == 'build':
fun = build
else:
fun = getattr(Utils.g_module, x, None)
if not fun:
raise Utils.WscriptError('No such command %r' % x)
ctx = getattr(Utils.g_module, x + '_context', Utils.Context)()
if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']:
# compatibility TODO remove in waf 1.6
try:
fun(ctx)
except TypeError:
fun()
else:
fun(ctx)
ela = ''
if not Options.options.progress_bar:
ela = ' (%s)' % Utils.get_elapsed_time(ini)
if x != 'init' and x != 'shutdown':
info('%r finished successfully%s' % (x, ela))
if not commands and x != 'shutdown':
commands.append('shutdown')
def configure(conf):
src = getattr(Options.options, SRCDIR, None)
if not src: src = getattr(Utils.g_module, SRCDIR, None)
if not src: src = getattr(Utils.g_module, 'top', None)
if not src:
src = '.'
incomplete_src = 1
src = os.path.abspath(src)
bld = getattr(Options.options, BLDDIR, None)
if not bld: bld = getattr(Utils.g_module, BLDDIR, None)
if not bld: bld = getattr(Utils.g_module, 'out', None)
if not bld:
bld = 'build'
incomplete_bld = 1
if bld == '.':
raise Utils.WafError('Setting blddir="." may cause distclean problems')
bld = os.path.abspath(bld)
try: os.makedirs(bld)
except OSError: pass
# It is not possible to compile specific targets in the configuration
# this may cause configuration errors if autoconfig is set
targets = Options.options.compile_targets
Options.options.compile_targets = None
Options.is_install = False
conf.srcdir = src
conf.blddir = bld
conf.post_init()
if 'incomplete_src' in vars():
conf.check_message_1('Setting srcdir to')
conf.check_message_2(src)
if 'incomplete_bld' in vars():
conf.check_message_1('Setting blddir to')
conf.check_message_2(bld)
# calling to main wscript's configure()
conf.sub_config([''])
conf.store()
# this will write a configure lock so that subsequent builds will
# consider the current path as the root directory (see prepare_impl).
# to remove: use 'waf distclean'
env = Environment.Environment()
env[BLDDIR] = bld
env[SRCDIR] = src
env['argv'] = sys.argv
env['commands'] = Options.commands
env['options'] = Options.options.__dict__
# conf.hash & conf.files hold wscript files paths and hash
# (used only by Configure.autoconfig)
env['hash'] = conf.hash
env['files'] = conf.files
env['environ'] = dict(conf.environ)
env['cwd'] = os.path.split(Utils.g_module.root_path)[0]
if Utils.g_module.root_path != src:
# in case the source dir is somewhere else
env.store(os.path.join(src, Options.lockfile))
env.store(Options.lockfile)
Options.options.compile_targets = targets
def clean(bld):
'''removes the build files'''
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError('Nothing to clean (project not configured)')
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
bld.is_install = 0 # False
# read the scripts - and set the path to the wscript path (useful for srcdir='/foo/bar')
bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
try:
bld.clean()
finally:
bld.save()
def check_configured(bld):
if not Configure.autoconfig:
return bld
conf_cls = getattr(Utils.g_module, 'configure_context', Utils.Context)
bld_cls = getattr(Utils.g_module, 'build_context', Utils.Context)
def reconf(proj):
back = (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose)
Options.commands = proj['commands']
Options.options.__dict__ = proj['options']
conf = conf_cls()
conf.environ = proj['environ']
configure(conf)
(Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose) = back
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
conf = conf_cls()
configure(conf)
else:
try:
bld = bld_cls()
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
except Utils.WafError:
reconf(proj)
return bld_cls()
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError('Auto-config: project does not configure (bug)')
h = 0
try:
for file in proj['files']:
if file.endswith('configure'):
h = hash((h, Utils.readf(file)))
else:
mod = Utils.load_module(file)
h = hash((h, mod.waf_hash_val))
except (OSError, IOError):
warn('Reconfiguring the project: a file is unavailable')
reconf(proj)
else:
if (h != proj['hash']):
warn('Reconfiguring the project: the configuration has changed')
reconf(proj)
return bld_cls()
def install(bld):
'''installs the build files'''
bld = check_configured(bld)
Options.commands['install'] = True
Options.commands['uninstall'] = False
Options.is_install = True
bld.is_install = INSTALL
build_impl(bld)
bld.install()
def uninstall(bld):
'''removes the installed files'''
Options.commands['install'] = False
Options.commands['uninstall'] = True
Options.is_install = True
bld.is_install = UNINSTALL
try:
def runnable_status(self):
return SKIP_ME
setattr(Task.Task, 'runnable_status_back', Task.Task.runnable_status)
setattr(Task.Task, 'runnable_status', runnable_status)
build_impl(bld)
bld.install()
finally:
setattr(Task.Task, 'runnable_status', Task.Task.runnable_status_back)
def build(bld):
bld = check_configured(bld)
Options.commands['install'] = False
Options.commands['uninstall'] = False
Options.is_install = False
bld.is_install = 0 # False
return build_impl(bld)
def build_impl(bld):
# compile the project and/or install the files
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError("Project not configured (run 'waf configure' first)")
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
info("Waf: Entering directory `%s'" % bld.bldnode.abspath())
bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
# execute something immediately before the build starts
bld.pre_build()
try:
bld.compile()
finally:
if Options.options.progress_bar: print('')
info("Waf: Leaving directory `%s'" % bld.bldnode.abspath())
# execute something immediately after a successful build
bld.post_build()
bld.install()
excludes = '.bzr .bzrignore .git .gitignore .svn CVS .cvsignore .arch-ids {arch} SCCS BitKeeper .hg _MTN _darcs Makefile Makefile.in config.log .gitattributes .hgignore .hgtags'.split()
dist_exts = '~ .rej .orig .pyc .pyo .bak .tar.bz2 tar.gz .zip .swp'.split()
def dont_dist(name, src, build_dir):
global excludes, dist_exts
if (name.startswith(',,')
or name.startswith('++')
or name.startswith('.waf')
or (src == '.' and name == Options.lockfile)
or name in excludes
or name == build_dir
):
return True
for ext in dist_exts:
if name.endswith(ext):
return True
return False
# like shutil.copytree
# exclude files and to raise exceptions immediately
def copytree(src, dst, build_dir):
names = os.listdir(src)
os.makedirs(dst)
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if dont_dist(name, src, build_dir):
continue
if os.path.isdir(srcname):
copytree(srcname, dstname, build_dir)
else:
shutil.copy2(srcname, dstname)
# TODO in waf 1.6, change this method if "srcdir == blddir" is allowed
def distclean(ctx=None):
'''removes the build directory'''
global commands
lst = os.listdir('.')
for f in lst:
if f == Options.lockfile:
try:
proj = Environment.Environment(f)
except:
Logs.warn('could not read %r' % f)
continue
try:
shutil.rmtree(proj[BLDDIR])
except IOError:
pass
except OSError, e:
if e.errno != errno.ENOENT:
Logs.warn('project %r cannot be removed' % proj[BLDDIR])
try:
os.remove(f)
except OSError, e:
if e.errno != errno.ENOENT:
Logs.warn('file %r cannot be removed' % f)
# remove the local waf cache
if not commands and f.startswith('.waf'):
shutil.rmtree(f, ignore_errors=True)
# FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version
def dist(appname='', version=''):
'''makes a tarball for redistributing the sources'''
# return return (distdirname, tarballname)
import tarfile
if not appname: appname = Utils.g_module.APPNAME
if not version: version = Utils.g_module.VERSION
tmp_folder = appname + '-' + version
if g_gz in ['gz', 'bz2']:
arch_name = tmp_folder + '.tar.' + g_gz
else:
arch_name = tmp_folder + '.' + 'zip'
# remove the previous dir
try:
shutil.rmtree(tmp_folder)
except (OSError, IOError):
pass
# remove the previous archive
try:
os.remove(arch_name)
except (OSError, IOError):
pass
# copy the files into the temporary folder
blddir = getattr(Utils.g_module, BLDDIR, None)
if not blddir:
blddir = getattr(Utils.g_module, 'out', None)
copytree('.', tmp_folder, blddir)
# undocumented hook for additional cleanup
dist_hook = getattr(Utils.g_module, 'dist_hook', None)
if dist_hook:
back = os.getcwd()
os.chdir(tmp_folder)
try:
dist_hook()
finally:
# go back to the root directory
os.chdir(back)
if g_gz in ['gz', 'bz2']:
tar = tarfile.open(arch_name, 'w:' + g_gz)
tar.add(tmp_folder)
tar.close()
else:
Utils.zip_folder(tmp_folder, arch_name, tmp_folder)
try: from hashlib import sha1 as sha
except ImportError: from sha import sha
try:
digest = " (sha=%r)" % sha(Utils.readf(arch_name)).hexdigest()
except:
digest = ''
info('New archive created: %s%s' % (arch_name, digest))
if os.path.exists(tmp_folder): shutil.rmtree(tmp_folder)
return arch_name
# FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version
def distcheck(appname='', version='', subdir=''):
'''checks if the sources compile (tarball from 'dist')'''
import tempfile, tarfile
if not appname: appname = Utils.g_module.APPNAME
if not version: version = Utils.g_module.VERSION
waf = os.path.abspath(sys.argv[0])
tarball = dist(appname, version)
path = appname + '-' + version
# remove any previous instance
if os.path.exists(path):
shutil.rmtree(path)
t = tarfile.open(tarball)
for x in t: t.extract(x)
t.close()
# build_path is the directory for the waf invocation
if subdir:
build_path = os.path.join(path, subdir)
else:
build_path = path
instdir = tempfile.mkdtemp('.inst', '%s-%s' % (appname, version))
ret = Utils.pproc.Popen([waf, 'configure', 'build', 'install', 'uninstall', '--destdir=' + instdir], cwd=build_path).wait()
if ret:
raise Utils.WafError('distcheck failed with code %i' % ret)
if os.path.exists(instdir):
raise Utils.WafError('distcheck succeeded, but files were left in %s' % instdir)
shutil.rmtree(path)
# FIXME remove in Waf 1.6 (kept for compatibility)
def add_subdir(dir, bld):
bld.recurse(dir, 'build')
ntdb-1.0/buildtools/wafadmin/Task.py 0000664 0000000 0000000 00000103010 12241515307 0017516 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"""
Running tasks in parallel is a simple problem, but in practice it is more complicated:
* dependencies discovered during the build (dynamic task creation)
* dependencies discovered after files are compiled
* the amount of tasks and dependencies (graph size) can be huge
This is why the dependency management is split on three different levels:
1. groups of tasks that run all after another group of tasks
2. groups of tasks that can be run in parallel
3. tasks that can run in parallel, but with possible unknown ad-hoc dependencies
The point #1 represents a strict sequential order between groups of tasks, for example a compiler is produced
and used to compile the rest, whereas #2 and #3 represent partial order constraints where #2 applies to the kind of task
and #3 applies to the task instances.
#1 is held by the task manager: ordered list of TaskGroups (see bld.add_group)
#2 is held by the task groups and the task types: precedence after/before (topological sort),
and the constraints extracted from file extensions
#3 is held by the tasks individually (attribute run_after),
and the scheduler (Runner.py) use Task::runnable_status to reorder the tasks
--
To try, use something like this in your code:
import Constants, Task
Task.algotype = Constants.MAXPARALLEL
--
There are two concepts with the tasks (individual units of change):
* dependency (if 1 is recompiled, recompile 2)
* order (run 2 after 1)
example 1: if t1 depends on t2 and t2 depends on t3 it is not necessary to make t1 depend on t3 (dependency is transitive)
example 2: if t1 depends on a node produced by t2, it is not immediately obvious that t1 must run after t2 (order is not obvious)
The role of the Task Manager is to give the tasks in order (groups of task that may be run in parallel one after the other)
"""
import os, shutil, sys, re, random, datetime, tempfile, shlex
from Utils import md5
import Build, Runner, Utils, Node, Logs, Options
from Logs import debug, warn, error
from Constants import *
algotype = NORMAL
#algotype = JOBCONTROL
#algotype = MAXPARALLEL
COMPILE_TEMPLATE_SHELL = '''
def f(task):
env = task.env
wd = getattr(task, 'cwd', None)
p = env.get_flat
cmd = \'\'\' %s \'\'\' % s
return task.exec_command(cmd, cwd=wd)
'''
COMPILE_TEMPLATE_NOSHELL = '''
def f(task):
env = task.env
wd = getattr(task, 'cwd', None)
def to_list(xx):
if isinstance(xx, str): return [xx]
return xx
lst = []
%s
lst = [x for x in lst if x]
return task.exec_command(lst, cwd=wd)
'''
"""
Enable different kind of dependency algorithms:
1 make groups: first compile all cpps and then compile all links (NORMAL)
2 parallelize all (each link task run after its dependencies) (MAXPARALLEL)
3 like 1 but provide additional constraints for the parallelization (MAXJOBS)
In theory 1. will be faster than 2 for waf, but might be slower for builds
The scheme 2 will not allow for running tasks one by one so it can cause disk thrashing on huge builds
"""
file_deps = Utils.nada
"""
Additional dependency pre-check may be added by replacing the function file_deps.
e.g. extract_outputs, extract_deps below.
"""
class TaskManager(object):
"""The manager is attached to the build object, it holds a list of TaskGroup"""
def __init__(self):
self.groups = []
self.tasks_done = []
self.current_group = 0
self.groups_names = {}
def group_name(self, g):
"""name for the group g (utility)"""
if not isinstance(g, TaskGroup):
g = self.groups[g]
for x in self.groups_names:
if id(self.groups_names[x]) == id(g):
return x
return ''
def group_idx(self, tg):
"""group the task generator tg is in"""
se = id(tg)
for i in range(len(self.groups)):
g = self.groups[i]
for t in g.tasks_gen:
if id(t) == se:
return i
return None
def get_next_set(self):
"""return the next set of tasks to execute
the first parameter is the maximum amount of parallelization that may occur"""
ret = None
while not ret and self.current_group < len(self.groups):
ret = self.groups[self.current_group].get_next_set()
if ret: return ret
else:
self.groups[self.current_group].process_install()
self.current_group += 1
return (None, None)
def add_group(self, name=None, set=True):
#if self.groups and not self.groups[0].tasks:
# error('add_group: an empty group is already present')
g = TaskGroup()
if name and name in self.groups_names:
error('add_group: name %s already present' % name)
self.groups_names[name] = g
self.groups.append(g)
if set:
self.current_group = len(self.groups) - 1
def set_group(self, idx):
if isinstance(idx, str):
g = self.groups_names[idx]
for x in xrange(len(self.groups)):
if id(g) == id(self.groups[x]):
self.current_group = x
else:
self.current_group = idx
def add_task_gen(self, tgen):
if not self.groups: self.add_group()
self.groups[self.current_group].tasks_gen.append(tgen)
def add_task(self, task):
if not self.groups: self.add_group()
self.groups[self.current_group].tasks.append(task)
def total(self):
total = 0
if not self.groups: return 0
for group in self.groups:
total += len(group.tasks)
return total
def add_finished(self, tsk):
self.tasks_done.append(tsk)
bld = tsk.generator.bld
if bld.is_install:
f = None
if 'install' in tsk.__dict__:
f = tsk.__dict__['install']
# install=0 to prevent installation
if f: f(tsk)
else:
tsk.install()
class TaskGroup(object):
"the compilation of one group does not begin until the previous group has finished (in the manager)"
def __init__(self):
self.tasks = [] # this list will be consumed
self.tasks_gen = []
self.cstr_groups = Utils.DefaultDict(list) # tasks having equivalent constraints
self.cstr_order = Utils.DefaultDict(set) # partial order between the cstr groups
self.temp_tasks = [] # tasks put on hold
self.ready = 0
self.post_funs = []
def reset(self):
"clears the state of the object (put back the tasks into self.tasks)"
for x in self.cstr_groups:
self.tasks += self.cstr_groups[x]
self.tasks = self.temp_tasks + self.tasks
self.temp_tasks = []
self.cstr_groups = Utils.DefaultDict(list)
self.cstr_order = Utils.DefaultDict(set)
self.ready = 0
def process_install(self):
for (f, k, kw) in self.post_funs:
f(*k, **kw)
def prepare(self):
"prepare the scheduling"
self.ready = 1
file_deps(self.tasks)
self.make_cstr_groups()
self.extract_constraints()
def get_next_set(self):
"next list of tasks to execute using max job settings, returns (maxjobs, task_list)"
global algotype
if algotype == NORMAL:
tasks = self.tasks_in_parallel()
maxj = MAXJOBS
elif algotype == JOBCONTROL:
(maxj, tasks) = self.tasks_by_max_jobs()
elif algotype == MAXPARALLEL:
tasks = self.tasks_with_inner_constraints()
maxj = MAXJOBS
else:
raise Utils.WafError("unknown algorithm type %s" % (algotype))
if not tasks: return ()
return (maxj, tasks)
def make_cstr_groups(self):
"unite the tasks that have similar constraints"
self.cstr_groups = Utils.DefaultDict(list)
for x in self.tasks:
h = x.hash_constraints()
self.cstr_groups[h].append(x)
def set_order(self, a, b):
self.cstr_order[a].add(b)
def compare_exts(self, t1, t2):
"extension production"
x = "ext_in"
y = "ext_out"
in_ = t1.attr(x, ())
out_ = t2.attr(y, ())
for k in in_:
if k in out_:
return -1
in_ = t2.attr(x, ())
out_ = t1.attr(y, ())
for k in in_:
if k in out_:
return 1
return 0
def compare_partial(self, t1, t2):
"partial relations after/before"
m = "after"
n = "before"
name = t2.__class__.__name__
if name in Utils.to_list(t1.attr(m, ())): return -1
elif name in Utils.to_list(t1.attr(n, ())): return 1
name = t1.__class__.__name__
if name in Utils.to_list(t2.attr(m, ())): return 1
elif name in Utils.to_list(t2.attr(n, ())): return -1
return 0
def extract_constraints(self):
"extract the parallelization constraints from the tasks with different constraints"
keys = self.cstr_groups.keys()
max = len(keys)
# hopefully the length of this list is short
for i in xrange(max):
t1 = self.cstr_groups[keys[i]][0]
for j in xrange(i + 1, max):
t2 = self.cstr_groups[keys[j]][0]
# add the constraints based on the comparisons
val = (self.compare_exts(t1, t2)
or self.compare_partial(t1, t2)
)
if val > 0:
self.set_order(keys[i], keys[j])
elif val < 0:
self.set_order(keys[j], keys[i])
def tasks_in_parallel(self):
"(NORMAL) next list of tasks that may be executed in parallel"
if not self.ready: self.prepare()
keys = self.cstr_groups.keys()
unconnected = []
remainder = []
for u in keys:
for k in self.cstr_order.values():
if u in k:
remainder.append(u)
break
else:
unconnected.append(u)
toreturn = []
for y in unconnected:
toreturn.extend(self.cstr_groups[y])
# remove stuff only after
for y in unconnected:
try: self.cstr_order.__delitem__(y)
except KeyError: pass
self.cstr_groups.__delitem__(y)
if not toreturn and remainder:
raise Utils.WafError("circular order constraint detected %r" % remainder)
return toreturn
def tasks_by_max_jobs(self):
"(JOBCONTROL) returns the tasks that can run in parallel with the max amount of jobs"
if not self.ready: self.prepare()
if not self.temp_tasks: self.temp_tasks = self.tasks_in_parallel()
if not self.temp_tasks: return (None, None)
maxjobs = MAXJOBS
ret = []
remaining = []
for t in self.temp_tasks:
m = getattr(t, "maxjobs", getattr(self.__class__, "maxjobs", MAXJOBS))
if m > maxjobs:
remaining.append(t)
elif m < maxjobs:
remaining += ret
ret = [t]
maxjobs = m
else:
ret.append(t)
self.temp_tasks = remaining
return (maxjobs, ret)
def tasks_with_inner_constraints(self):
"""(MAXPARALLEL) returns all tasks in this group, but add the constraints on each task instance
as an optimization, it might be desirable to discard the tasks which do not have to run"""
if not self.ready: self.prepare()
if getattr(self, "done", None): return None
for p in self.cstr_order:
for v in self.cstr_order[p]:
for m in self.cstr_groups[p]:
for n in self.cstr_groups[v]:
n.set_run_after(m)
self.cstr_order = Utils.DefaultDict(set)
self.cstr_groups = Utils.DefaultDict(list)
self.done = 1
return self.tasks[:] # make a copy
class store_task_type(type):
"store the task types that have a name ending in _task into a map (remember the existing task types)"
def __init__(cls, name, bases, dict):
super(store_task_type, cls).__init__(name, bases, dict)
name = cls.__name__
if name.endswith('_task'):
name = name.replace('_task', '')
if name != 'TaskBase':
TaskBase.classes[name] = cls
class TaskBase(object):
"""Base class for all Waf tasks
The most important methods are (by usual order of call):
1 runnable_status: ask the task if it should be run, skipped, or if we have to ask later
2 __str__: string to display to the user
3 run: execute the task
4 post_run: after the task is run, update the cache about the task
This class should be seen as an interface, it provides the very minimum necessary for the scheduler
so it does not do much.
For illustration purposes, TaskBase instances try to execute self.fun (if provided)
"""
__metaclass__ = store_task_type
color = "GREEN"
maxjobs = MAXJOBS
classes = {}
stat = None
def __init__(self, *k, **kw):
self.hasrun = NOT_RUN
try:
self.generator = kw['generator']
except KeyError:
self.generator = self
self.bld = Build.bld
if kw.get('normal', 1):
self.generator.bld.task_manager.add_task(self)
def __repr__(self):
"used for debugging"
return '\n\t{task: %s %s}' % (self.__class__.__name__, str(getattr(self, "fun", "")))
def __str__(self):
"string to display to the user"
if hasattr(self, 'fun'):
return 'executing: %s\n' % self.fun.__name__
return self.__class__.__name__ + '\n'
def exec_command(self, *k, **kw):
"use this for executing commands from tasks"
# TODO in waf 1.6, eliminate bld.exec_command, and move the cwd processing to here
if self.env['env']:
kw['env'] = self.env['env']
return self.generator.bld.exec_command(*k, **kw)
def runnable_status(self):
"RUN_ME SKIP_ME or ASK_LATER"
return RUN_ME
def can_retrieve_cache(self):
return False
def call_run(self):
if self.can_retrieve_cache():
return 0
return self.run()
def run(self):
"called if the task must run"
if hasattr(self, 'fun'):
return self.fun(self)
return 0
def post_run(self):
"update the dependency tree (node stats)"
pass
def display(self):
"print either the description (using __str__) or the progress bar or the ide output"
col1 = Logs.colors(self.color)
col2 = Logs.colors.NORMAL
if Options.options.progress_bar == 1:
return self.generator.bld.progress_line(self.position[0], self.position[1], col1, col2)
if Options.options.progress_bar == 2:
ela = Utils.get_elapsed_time(self.generator.bld.ini)
try:
ins = ','.join([n.name for n in self.inputs])
except AttributeError:
ins = ''
try:
outs = ','.join([n.name for n in self.outputs])
except AttributeError:
outs = ''
return '|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n' % (self.position[1], self.position[0], ins, outs, ela)
total = self.position[1]
n = len(str(total))
fs = '[%%%dd/%%%dd] %%s%%s%%s' % (n, n)
return fs % (self.position[0], self.position[1], col1, str(self), col2)
def attr(self, att, default=None):
"retrieve an attribute from the instance or from the class (microoptimization here)"
ret = getattr(self, att, self)
if ret is self: return getattr(self.__class__, att, default)
return ret
def hash_constraints(self):
"identify a task type for all the constraints relevant for the scheduler: precedence, file production"
a = self.attr
sum = hash((self.__class__.__name__,
str(a('before', '')),
str(a('after', '')),
str(a('ext_in', '')),
str(a('ext_out', '')),
self.__class__.maxjobs))
return sum
def format_error(self):
"error message to display to the user (when a build fails)"
if getattr(self, "err_msg", None):
return self.err_msg
elif self.hasrun == CRASHED:
try:
return " -> task failed (err #%d): %r" % (self.err_code, self)
except AttributeError:
return " -> task failed: %r" % self
elif self.hasrun == MISSING:
return " -> missing files: %r" % self
else:
return ''
def install(self):
"""
installation is performed by looking at the task attributes:
* install_path: installation path like "${PREFIX}/bin"
* filename: install the first node in the outputs as a file with a particular name, be certain to give os.sep
* chmod: permissions
"""
bld = self.generator.bld
d = self.attr('install')
if self.attr('install_path'):
lst = [a.relpath_gen(bld.srcnode) for a in self.outputs]
perm = self.attr('chmod', O644)
if self.attr('src'):
# if src is given, install the sources too
lst += [a.relpath_gen(bld.srcnode) for a in self.inputs]
if self.attr('filename'):
dir = self.install_path.rstrip(os.sep) + os.sep + self.attr('filename')
bld.install_as(dir, lst[0], self.env, perm)
else:
bld.install_files(self.install_path, lst, self.env, perm)
class Task(TaskBase):
"""The parent class is quite limited, in this version:
* file system interaction: input and output nodes
* persistence: do not re-execute tasks that have already run
* caching: same files can be saved and retrieved from a cache directory
* dependencies:
implicit, like .c files depending on .h files
explicit, like the input nodes or the dep_nodes
environment variables, like the CXXFLAGS in self.env
"""
vars = []
def __init__(self, env, **kw):
TaskBase.__init__(self, **kw)
self.env = env
# inputs and outputs are nodes
# use setters when possible
self.inputs = []
self.outputs = []
self.dep_nodes = []
self.run_after = []
# Additionally, you may define the following
#self.dep_vars = 'PREFIX DATADIR'
def __str__(self):
"string to display to the user"
env = self.env
src_str = ' '.join([a.nice_path(env) for a in self.inputs])
tgt_str = ' '.join([a.nice_path(env) for a in self.outputs])
if self.outputs: sep = ' -> '
else: sep = ''
return '%s: %s%s%s\n' % (self.__class__.__name__.replace('_task', ''), src_str, sep, tgt_str)
def __repr__(self):
return "".join(['\n\t{task: ', self.__class__.__name__, " ", ",".join([x.name for x in self.inputs]), " -> ", ",".join([x.name for x in self.outputs]), '}'])
def unique_id(self):
"get a unique id: hash the node paths, the variant, the class, the function"
try:
return self.uid
except AttributeError:
"this is not a real hot zone, but we want to avoid surprizes here"
m = md5()
up = m.update
up(self.__class__.__name__)
up(self.env.variant())
p = None
for x in self.inputs + self.outputs:
if p != x.parent.id:
p = x.parent.id
up(x.parent.abspath())
up(x.name)
self.uid = m.digest()
return self.uid
def set_inputs(self, inp):
if isinstance(inp, list): self.inputs += inp
else: self.inputs.append(inp)
def set_outputs(self, out):
if isinstance(out, list): self.outputs += out
else: self.outputs.append(out)
def set_run_after(self, task):
"set (scheduler) order on another task"
# TODO: handle list or object
assert isinstance(task, TaskBase)
self.run_after.append(task)
def add_file_dependency(self, filename):
"TODO user-provided file dependencies"
node = self.generator.bld.path.find_resource(filename)
self.dep_nodes.append(node)
def signature(self):
# compute the result one time, and suppose the scan_signature will give the good result
try: return self.cache_sig[0]
except AttributeError: pass
self.m = md5()
# explicit deps
exp_sig = self.sig_explicit_deps()
# env vars
var_sig = self.sig_vars()
# implicit deps
imp_sig = SIG_NIL
if self.scan:
try:
imp_sig = self.sig_implicit_deps()
except ValueError:
return self.signature()
# we now have the signature (first element) and the details (for debugging)
ret = self.m.digest()
self.cache_sig = (ret, exp_sig, imp_sig, var_sig)
return ret
def runnable_status(self):
"SKIP_ME RUN_ME or ASK_LATER"
#return 0 # benchmarking
if self.inputs and (not self.outputs):
if not getattr(self.__class__, 'quiet', None):
warn("invalid task (no inputs OR outputs): override in a Task subclass or set the attribute 'quiet' %r" % self)
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
env = self.env
bld = self.generator.bld
# first compute the signature
new_sig = self.signature()
# compare the signature to a signature computed previously
key = self.unique_id()
try:
prev_sig = bld.task_sigs[key][0]
except KeyError:
debug("task: task %r must run as it was never run before or the task code changed", self)
return RUN_ME
# compare the signatures of the outputs
for node in self.outputs:
variant = node.variant(env)
try:
if bld.node_sigs[variant][node.id] != new_sig:
return RUN_ME
except KeyError:
debug("task: task %r must run as the output nodes do not exist", self)
return RUN_ME
# debug if asked to
if Logs.verbose: self.debug_why(bld.task_sigs[key])
if new_sig != prev_sig:
return RUN_ME
return SKIP_ME
def post_run(self):
"called after a successful task run"
bld = self.generator.bld
env = self.env
sig = self.signature()
ssig = sig.encode('hex')
variant = env.variant()
for node in self.outputs:
# check if the node exists ..
try:
os.stat(node.abspath(env))
except OSError:
self.hasrun = MISSING
self.err_msg = '-> missing file: %r' % node.abspath(env)
raise Utils.WafError
# important, store the signature for the next run
bld.node_sigs[variant][node.id] = sig
bld.task_sigs[self.unique_id()] = self.cache_sig
# file caching, if possible
# try to avoid data corruption as much as possible
if not Options.cache_global or Options.options.nocache or not self.outputs:
return None
if getattr(self, 'cached', None):
return None
dname = os.path.join(Options.cache_global, ssig)
tmpdir = tempfile.mkdtemp(prefix=Options.cache_global + os.sep + 'waf')
try:
shutil.rmtree(dname)
except:
pass
try:
i = 0
for node in self.outputs:
variant = node.variant(env)
dest = os.path.join(tmpdir, str(i) + node.name)
shutil.copy2(node.abspath(env), dest)
i += 1
except (OSError, IOError):
try:
shutil.rmtree(tmpdir)
except:
pass
else:
try:
os.rename(tmpdir, dname)
except OSError:
try:
shutil.rmtree(tmpdir)
except:
pass
else:
try:
os.chmod(dname, O755)
except:
pass
def can_retrieve_cache(self):
"""
Retrieve build nodes from the cache
update the file timestamps to help cleaning the least used entries from the cache
additionally, set an attribute 'cached' to avoid re-creating the same cache files
suppose there are files in cache/dir1/file1 and cache/dir2/file2
first, read the timestamp of dir1
then try to copy the files
then look at the timestamp again, if it has changed, the data may have been corrupt (cache update by another process)
should an exception occur, ignore the data
"""
if not Options.cache_global or Options.options.nocache or not self.outputs:
return None
env = self.env
sig = self.signature()
ssig = sig.encode('hex')
# first try to access the cache folder for the task
dname = os.path.join(Options.cache_global, ssig)
try:
t1 = os.stat(dname).st_mtime
except OSError:
return None
i = 0
for node in self.outputs:
variant = node.variant(env)
orig = os.path.join(dname, str(i) + node.name)
try:
shutil.copy2(orig, node.abspath(env))
# mark the cache file as used recently (modified)
os.utime(orig, None)
except (OSError, IOError):
debug('task: failed retrieving file')
return None
i += 1
# is it the same folder?
try:
t2 = os.stat(dname).st_mtime
except OSError:
return None
if t1 != t2:
return None
for node in self.outputs:
self.generator.bld.node_sigs[variant][node.id] = sig
if Options.options.progress_bar < 1:
self.generator.bld.printout('restoring from cache %r\n' % node.bldpath(env))
self.cached = True
return 1
def debug_why(self, old_sigs):
"explains why a task is run"
new_sigs = self.cache_sig
def v(x):
return x.encode('hex')
debug("Task %r", self)
msgs = ['Task must run', '* Source file or manual dependency', '* Implicit dependency', '* Environment variable']
tmp = 'task: -> %s: %s %s'
for x in xrange(len(msgs)):
if (new_sigs[x] != old_sigs[x]):
debug(tmp, msgs[x], v(old_sigs[x]), v(new_sigs[x]))
def sig_explicit_deps(self):
bld = self.generator.bld
up = self.m.update
# the inputs
for x in self.inputs + getattr(self, 'dep_nodes', []):
if not x.parent.id in bld.cache_scanned_folders:
bld.rescan(x.parent)
variant = x.variant(self.env)
try:
up(bld.node_sigs[variant][x.id])
except KeyError:
raise Utils.WafError('Missing node signature for %r (required by %r)' % (x, self))
# manual dependencies, they can slow down the builds
if bld.deps_man:
additional_deps = bld.deps_man
for x in self.inputs + self.outputs:
try:
d = additional_deps[x.id]
except KeyError:
continue
for v in d:
if isinstance(v, Node.Node):
bld.rescan(v.parent)
variant = v.variant(self.env)
try:
v = bld.node_sigs[variant][v.id]
except KeyError:
raise Utils.WafError('Missing node signature for %r (required by %r)' % (v, self))
elif hasattr(v, '__call__'):
v = v() # dependency is a function, call it
up(v)
for x in self.dep_nodes:
v = bld.node_sigs[x.variant(self.env)][x.id]
up(v)
return self.m.digest()
def sig_vars(self):
bld = self.generator.bld
env = self.env
# dependencies on the environment vars
act_sig = bld.hash_env_vars(env, self.__class__.vars)
self.m.update(act_sig)
# additional variable dependencies, if provided
dep_vars = getattr(self, 'dep_vars', None)
if dep_vars:
self.m.update(bld.hash_env_vars(env, dep_vars))
return self.m.digest()
#def scan(self, node):
# """this method returns a tuple containing:
# * a list of nodes corresponding to real files
# * a list of names for files not found in path_lst
# the input parameters may have more parameters that the ones used below
# """
# return ((), ())
scan = None
# compute the signature, recompute it if there is no match in the cache
def sig_implicit_deps(self):
"the signature obtained may not be the one if the files have changed, we do it in two steps"
bld = self.generator.bld
# get the task signatures from previous runs
key = self.unique_id()
prev_sigs = bld.task_sigs.get(key, ())
if prev_sigs:
try:
# for issue #379
if prev_sigs[2] == self.compute_sig_implicit_deps():
return prev_sigs[2]
except (KeyError, OSError):
pass
del bld.task_sigs[key]
raise ValueError('rescan')
# no previous run or the signature of the dependencies has changed, rescan the dependencies
(nodes, names) = self.scan()
if Logs.verbose:
debug('deps: scanner for %s returned %s %s', str(self), str(nodes), str(names))
# store the dependencies in the cache
bld.node_deps[key] = nodes
bld.raw_deps[key] = names
# recompute the signature and return it
try:
sig = self.compute_sig_implicit_deps()
except KeyError:
try:
nodes = []
for k in bld.node_deps.get(self.unique_id(), []):
if k.id & 3 == 2: # Node.FILE:
if not k.id in bld.node_sigs[0]:
nodes.append(k)
else:
if not k.id in bld.node_sigs[self.env.variant()]:
nodes.append(k)
except:
nodes = '?'
raise Utils.WafError('Missing node signature for %r (for implicit dependencies %r)' % (nodes, self))
return sig
def compute_sig_implicit_deps(self):
"""it is intended for .cpp and inferred .h files
there is a single list (no tree traversal)
this is the hot spot so ... do not touch"""
upd = self.m.update
bld = self.generator.bld
tstamp = bld.node_sigs
env = self.env
for k in bld.node_deps.get(self.unique_id(), []):
# unlikely but necessary if it happens
if not k.parent.id in bld.cache_scanned_folders:
# if the parent folder is removed, an OSError may be thrown
bld.rescan(k.parent)
# if the parent folder is removed, a KeyError will be thrown
if k.id & 3 == 2: # Node.FILE:
upd(tstamp[0][k.id])
else:
upd(tstamp[env.variant()][k.id])
return self.m.digest()
def funex(c):
dc = {}
exec(c, dc)
return dc['f']
reg_act = re.compile(r"(?P\\)|(?P\$\$)|(?P\$\{(?P\w+)(?P.*?)\})", re.M)
def compile_fun_shell(name, line):
"""Compiles a string (once) into a function, eg:
simple_task_type('c++', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}')
The env variables (CXX, ..) on the task must not hold dicts (order)
The reserved keywords TGT and SRC represent the task input and output nodes
quick test:
bld(source='wscript', rule='echo "foo\\${SRC[0].name}\\bar"')
"""
extr = []
def repl(match):
g = match.group
if g('dollar'): return "$"
elif g('backslash'): return '\\\\'
elif g('subst'): extr.append((g('var'), g('code'))); return "%s"
return None
line = reg_act.sub(repl, line) or line
parm = []
dvars = []
app = parm.append
for (var, meth) in extr:
if var == 'SRC':
if meth: app('task.inputs%s' % meth)
else: app('" ".join([a.srcpath(env) for a in task.inputs])')
elif var == 'TGT':
if meth: app('task.outputs%s' % meth)
else: app('" ".join([a.bldpath(env) for a in task.outputs])')
else:
if not var in dvars: dvars.append(var)
app("p('%s')" % var)
if parm: parm = "%% (%s) " % (',\n\t\t'.join(parm))
else: parm = ''
c = COMPILE_TEMPLATE_SHELL % (line, parm)
debug('action: %s', c)
return (funex(c), dvars)
def compile_fun_noshell(name, line):
extr = []
def repl(match):
g = match.group
if g('dollar'): return "$"
elif g('subst'): extr.append((g('var'), g('code'))); return "<<|@|>>"
return None
line2 = reg_act.sub(repl, line)
params = line2.split('<<|@|>>')
buf = []
dvars = []
app = buf.append
for x in xrange(len(extr)):
params[x] = params[x].strip()
if params[x]:
app("lst.extend(%r)" % params[x].split())
(var, meth) = extr[x]
if var == 'SRC':
if meth: app('lst.append(task.inputs%s)' % meth)
else: app("lst.extend([a.srcpath(env) for a in task.inputs])")
elif var == 'TGT':
if meth: app('lst.append(task.outputs%s)' % meth)
else: app("lst.extend([a.bldpath(env) for a in task.outputs])")
else:
app('lst.extend(to_list(env[%r]))' % var)
if not var in dvars: dvars.append(var)
if params[-1]:
app("lst.extend(%r)" % shlex.split(params[-1]))
fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf)
debug('action: %s', fun)
return (funex(fun), dvars)
def compile_fun(name, line, shell=None):
"commands can be launched by the shell or not"
if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0:
shell = True
#else:
# shell = False
if shell is None:
if sys.platform == 'win32':
shell = False
else:
shell = True
if shell:
return compile_fun_shell(name, line)
else:
return compile_fun_noshell(name, line)
def simple_task_type(name, line, color='GREEN', vars=[], ext_in=[], ext_out=[], before=[], after=[], shell=None):
"""return a new Task subclass with the function run compiled from the line given"""
(fun, dvars) = compile_fun(name, line, shell)
fun.code = line
return task_type_from_func(name, fun, vars or dvars, color, ext_in, ext_out, before, after)
def task_type_from_func(name, func, vars=[], color='GREEN', ext_in=[], ext_out=[], before=[], after=[]):
"""return a new Task subclass with the function run compiled from the line given"""
params = {
'run': func,
'vars': vars,
'color': color,
'name': name,
'ext_in': Utils.to_list(ext_in),
'ext_out': Utils.to_list(ext_out),
'before': Utils.to_list(before),
'after': Utils.to_list(after),
}
cls = type(Task)(name, (Task,), params)
TaskBase.classes[name] = cls
return cls
def always_run(cls):
"""Set all task instances of this class to be executed whenever a build is started
The task signature is calculated, but the result of the comparation between
task signatures is bypassed
"""
old = cls.runnable_status
def always(self):
ret = old(self)
if ret == SKIP_ME:
return RUN_ME
return ret
cls.runnable_status = always
def update_outputs(cls):
"""When a command is always run, it is possible that the output only change
sometimes. By default the build node have as a hash the signature of the task
which may not change. With this, the output nodes (produced) are hashed,
and the hashes are set to the build nodes
This may avoid unnecessary recompilations, but it uses more resources
(hashing the output files) so it is not used by default
"""
old_post_run = cls.post_run
def post_run(self):
old_post_run(self)
bld = self.generator.bld
for output in self.outputs:
bld.node_sigs[self.env.variant()][output.id] = Utils.h_file(output.abspath(self.env))
bld.task_sigs[output.id] = self.unique_id()
cls.post_run = post_run
old_runnable_status = cls.runnable_status
def runnable_status(self):
status = old_runnable_status(self)
if status != RUN_ME:
return status
uid = self.unique_id()
try:
bld = self.outputs[0].__class__.bld
new_sig = self.signature()
prev_sig = bld.task_sigs[uid][0]
if prev_sig == new_sig:
for x in self.outputs:
if not x.id in bld.node_sigs[self.env.variant()]:
return RUN_ME
if bld.task_sigs[x.id] != uid: # ensure the outputs are associated with *this* task
return RUN_ME
return SKIP_ME
except KeyError:
pass
except IndexError:
pass
return RUN_ME
cls.runnable_status = runnable_status
def extract_outputs(tasks):
"""file_deps: Infer additional dependencies from task input and output nodes
"""
v = {}
for x in tasks:
try:
(ins, outs) = v[x.env.variant()]
except KeyError:
ins = {}
outs = {}
v[x.env.variant()] = (ins, outs)
for a in getattr(x, 'inputs', []):
try: ins[a.id].append(x)
except KeyError: ins[a.id] = [x]
for a in getattr(x, 'outputs', []):
try: outs[a.id].append(x)
except KeyError: outs[a.id] = [x]
for (ins, outs) in v.values():
links = set(ins.iterkeys()).intersection(outs.iterkeys())
for k in links:
for a in ins[k]:
for b in outs[k]:
a.set_run_after(b)
def extract_deps(tasks):
"""file_deps: Infer additional dependencies from task input and output nodes and from implicit dependencies
returned by the scanners - that will only work if all tasks are created
this is aimed at people who have pathological builds and who do not care enough
to implement the build dependencies properly
with two loops over the list of tasks, do not expect this to be really fast
"""
# first reuse the function above
extract_outputs(tasks)
# map the output nodes to the tasks producing them
out_to_task = {}
for x in tasks:
v = x.env.variant()
try:
lst = x.outputs
except AttributeError:
pass
else:
for node in lst:
out_to_task[(v, node.id)] = x
# map the dependencies found to the tasks compiled
dep_to_task = {}
for x in tasks:
try:
x.signature()
except: # this is on purpose
pass
v = x.env.variant()
key = x.unique_id()
for k in x.generator.bld.node_deps.get(x.unique_id(), []):
try: dep_to_task[(v, k.id)].append(x)
except KeyError: dep_to_task[(v, k.id)] = [x]
# now get the intersection
deps = set(dep_to_task.keys()).intersection(set(out_to_task.keys()))
# and add the dependencies from task to task
for idx in deps:
for k in dep_to_task[idx]:
k.set_run_after(out_to_task[idx])
# cleanup, remove the signatures
for x in tasks:
try:
delattr(x, 'cache_sig')
except AttributeError:
pass
ntdb-1.0/buildtools/wafadmin/TaskGen.py 0000664 0000000 0000000 00000042424 12241515307 0020163 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"""
The class task_gen encapsulates the creation of task objects (low-level code)
The instances can have various parameters, but the creation of task nodes (Task.py)
is delayed. To achieve this, various methods are called from the method "apply"
The class task_gen contains lots of methods, and a configuration table:
* the methods to call (self.meths) can be specified dynamically (removing, adding, ..)
* the order of the methods (self.prec or by default task_gen.prec) is configurable
* new methods can be inserted dynamically without pasting old code
Additionally, task_gen provides the method apply_core
* file extensions are mapped to methods: def meth(self, name_or_node)
* if a mapping is not found in self.mappings, it is searched in task_gen.mappings
* when called, the functions may modify self.allnodes to re-add source to process
* the mappings can map an extension or a filename (see the code below)
WARNING: subclasses must reimplement the clone method
"""
import os, traceback, copy
import Build, Task, Utils, Logs, Options
from Logs import debug, error, warn
from Constants import *
typos = {
'sources':'source',
'targets':'target',
'include':'includes',
'define':'defines',
'importpath':'importpaths',
'install_var':'install_path',
'install_subdir':'install_path',
'inst_var':'install_path',
'inst_dir':'install_path',
'feature':'features',
}
class register_obj(type):
"""no decorators for classes, so we use a metaclass
we store into task_gen.classes the classes that inherit task_gen
and whose names end in '_taskgen'
"""
def __init__(cls, name, bases, dict):
super(register_obj, cls).__init__(name, bases, dict)
name = cls.__name__
suffix = '_taskgen'
if name.endswith(suffix):
task_gen.classes[name.replace(suffix, '')] = cls
class task_gen(object):
"""
Most methods are of the form 'def meth(self):' without any parameters
there are many of them, and they do many different things:
* task creation
* task results installation
* environment modification
* attribute addition/removal
The inheritance approach is complicated
* mixing several languages at once
* subclassing is needed even for small changes
* inserting new methods is complicated
This new class uses a configuration table:
* adding new methods easily
* obtaining the order in which to call the methods
* postponing the method calls (post() -> apply)
Additionally, a 'traits' static attribute is provided:
* this list contains methods
* the methods can remove or add methods from self.meths
Example1: the attribute 'staticlib' is set on an instance
a method set in the list of traits is executed when the
instance is posted, it finds that flag and adds another method for execution
Example2: a method set in the list of traits finds the msvc
compiler (from self.env['MSVC']==1); more methods are added to self.meths
"""
__metaclass__ = register_obj
mappings = {}
mapped = {}
prec = Utils.DefaultDict(list)
traits = Utils.DefaultDict(set)
classes = {}
def __init__(self, *kw, **kwargs):
self.prec = Utils.DefaultDict(list)
"map precedence of function names to call"
# so we will have to play with directed acyclic graphs
# detect cycles, etc
self.source = ''
self.target = ''
# list of methods to execute - does not touch it by hand unless you know
self.meths = []
# list of mappings extension -> function
self.mappings = {}
# list of features (see the documentation on traits)
self.features = list(kw)
# not always a good idea
self.tasks = []
self.default_chmod = O644
self.default_install_path = None
# kind of private, beware of what you put in it, also, the contents are consumed
self.allnodes = []
self.bld = kwargs.get('bld', Build.bld)
self.env = self.bld.env.copy()
self.path = self.bld.path # emulate chdir when reading scripts
self.name = '' # give a name to the target (static+shlib with the same targetname ambiguity)
# provide a unique id
self.idx = self.bld.idx[self.path.id] = self.bld.idx.get(self.path.id, 0) + 1
for key, val in kwargs.iteritems():
setattr(self, key, val)
self.bld.task_manager.add_task_gen(self)
self.bld.all_task_gen.append(self)
def __str__(self):
return (""
% (self.name or self.target, self.__class__.__name__, str(self.path)))
def __setattr__(self, name, attr):
real = typos.get(name, name)
if real != name:
warn('typo %s -> %s' % (name, real))
if Logs.verbose > 0:
traceback.print_stack()
object.__setattr__(self, real, attr)
def to_list(self, value):
"helper: returns a list"
if isinstance(value, str): return value.split()
else: return value
def apply(self):
"order the methods to execute using self.prec or task_gen.prec"
keys = set(self.meths)
# add the methods listed in the features
self.features = Utils.to_list(self.features)
for x in self.features + ['*']:
st = task_gen.traits[x]
if not st:
warn('feature %r does not exist - bind at least one method to it' % x)
keys.update(st)
# copy the precedence table
prec = {}
prec_tbl = self.prec or task_gen.prec
for x in prec_tbl:
if x in keys:
prec[x] = prec_tbl[x]
# elements disconnected
tmp = []
for a in keys:
for x in prec.values():
if a in x: break
else:
tmp.append(a)
# topological sort
out = []
while tmp:
e = tmp.pop()
if e in keys: out.append(e)
try:
nlst = prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec: raise Utils.WafError("graph has a cycle %s" % str(prec))
out.reverse()
self.meths = out
# then we run the methods in order
debug('task_gen: posting %s %d', self, id(self))
for x in out:
try:
v = getattr(self, x)
except AttributeError:
raise Utils.WafError("tried to retrieve %s which is not a valid method" % x)
debug('task_gen: -> %s (%d)', x, id(self))
v()
def post(self):
"runs the code to create the tasks, do not subclass"
if not self.name:
if isinstance(self.target, list):
self.name = ' '.join(self.target)
else:
self.name = self.target
if getattr(self, 'posted', None):
#error("OBJECT ALREADY POSTED" + str( self))
return
self.apply()
self.posted = True
debug('task_gen: posted %s', self.name)
def get_hook(self, ext):
try: return self.mappings[ext]
except KeyError:
try: return task_gen.mappings[ext]
except KeyError: return None
# TODO waf 1.6: always set the environment
# TODO waf 1.6: create_task(self, name, inputs, outputs)
def create_task(self, name, src=None, tgt=None, env=None):
env = env or self.env
task = Task.TaskBase.classes[name](env.copy(), generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
self.tasks.append(task)
return task
def name_to_obj(self, name):
return self.bld.name_to_obj(name, self.env)
def find_sources_in_dirs(self, dirnames, excludes=[], exts=[]):
"""
The attributes "excludes" and "exts" must be lists to avoid the confusion
find_sources_in_dirs('a', 'b', 'c') <-> find_sources_in_dirs('a b c')
do not use absolute paths
do not use paths outside of the source tree
the files or folder beginning by . are not returned
# TODO: remove in Waf 1.6
"""
err_msg = "'%s' attribute must be a list"
if not isinstance(excludes, list):
raise Utils.WscriptError(err_msg % 'excludes')
if not isinstance(exts, list):
raise Utils.WscriptError(err_msg % 'exts')
lst = []
#make sure dirnames is a list helps with dirnames with spaces
dirnames = self.to_list(dirnames)
ext_lst = exts or list(self.mappings.keys()) + list(task_gen.mappings.keys())
for name in dirnames:
anode = self.path.find_dir(name)
if not anode or not anode.is_child_of(self.bld.srcnode):
raise Utils.WscriptError("Unable to use '%s' - either because it's not a relative path" \
", or it's not child of '%s'." % (name, self.bld.srcnode))
self.bld.rescan(anode)
for name in self.bld.cache_dir_contents[anode.id]:
# ignore hidden files
if name.startswith('.'):
continue
(base, ext) = os.path.splitext(name)
if ext in ext_lst and not name in lst and not name in excludes:
lst.append((anode.relpath_gen(self.path) or '.') + os.path.sep + name)
lst.sort()
self.source = self.to_list(self.source)
if not self.source: self.source = lst
else: self.source += lst
def clone(self, env):
"""when creating a clone in a task generator method,
make sure to set posted=False on the clone
else the other task generator will not create its tasks"""
newobj = task_gen(bld=self.bld)
for x in self.__dict__:
if x in ['env', 'bld']:
continue
elif x in ["path", "features"]:
setattr(newobj, x, getattr(self, x))
else:
setattr(newobj, x, copy.copy(getattr(self, x)))
newobj.__class__ = self.__class__
if isinstance(env, str):
newobj.env = self.bld.all_envs[env].copy()
else:
newobj.env = env.copy()
return newobj
def get_inst_path(self):
return getattr(self, '_install_path', getattr(self, 'default_install_path', ''))
def set_inst_path(self, val):
self._install_path = val
install_path = property(get_inst_path, set_inst_path)
def get_chmod(self):
return getattr(self, '_chmod', getattr(self, 'default_chmod', O644))
def set_chmod(self, val):
self._chmod = val
chmod = property(get_chmod, set_chmod)
def declare_extension(var, func):
try:
for x in Utils.to_list(var):
task_gen.mappings[x] = func
except:
raise Utils.WscriptError('declare_extension takes either a list or a string %r' % var)
task_gen.mapped[func.__name__] = func
def declare_order(*k):
assert(len(k) > 1)
n = len(k) - 1
for i in xrange(n):
f1 = k[i]
f2 = k[i+1]
if not f1 in task_gen.prec[f2]:
task_gen.prec[f2].append(f1)
def declare_chain(name='', action='', ext_in='', ext_out='', reentrant=True, color='BLUE',
install=0, before=[], after=[], decider=None, rule=None, scan=None):
"""
see Tools/flex.py for an example
while i do not like such wrappers, some people really do
"""
action = action or rule
if isinstance(action, str):
act = Task.simple_task_type(name, action, color=color)
else:
act = Task.task_type_from_func(name, action, color=color)
act.ext_in = tuple(Utils.to_list(ext_in))
act.ext_out = tuple(Utils.to_list(ext_out))
act.before = Utils.to_list(before)
act.after = Utils.to_list(after)
act.scan = scan
def x_file(self, node):
if decider:
ext = decider(self, node)
else:
ext = ext_out
if isinstance(ext, str):
out_source = node.change_ext(ext)
if reentrant:
self.allnodes.append(out_source)
elif isinstance(ext, list):
out_source = [node.change_ext(x) for x in ext]
if reentrant:
for i in xrange((reentrant is True) and len(out_source) or reentrant):
self.allnodes.append(out_source[i])
else:
# XXX: useless: it will fail on Utils.to_list above...
raise Utils.WafError("do not know how to process %s" % str(ext))
tsk = self.create_task(name, node, out_source)
if node.__class__.bld.is_install:
tsk.install = install
declare_extension(act.ext_in, x_file)
return x_file
def bind_feature(name, methods):
lst = Utils.to_list(methods)
task_gen.traits[name].update(lst)
"""
All the following decorators are registration decorators, i.e add an attribute to current class
(task_gen and its derivatives), with same name as func, which points to func itself.
For example:
@taskgen
def sayHi(self):
print("hi")
Now taskgen.sayHi() may be called
If python were really smart, it could infer itself the order of methods by looking at the
attributes. A prerequisite for execution is to have the attribute set before.
Intelligent compilers binding aspect-oriented programming and parallelization, what a nice topic for studies.
"""
def taskgen(func):
"""
register a method as a task generator method
"""
setattr(task_gen, func.__name__, func)
return func
def feature(*k):
"""
declare a task generator method that will be executed when the
object attribute 'feature' contains the corresponding key(s)
"""
def deco(func):
setattr(task_gen, func.__name__, func)
for name in k:
task_gen.traits[name].update([func.__name__])
return func
return deco
def before(*k):
"""
declare a task generator method which will be executed
before the functions of given name(s)
"""
def deco(func):
setattr(task_gen, func.__name__, func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
def after(*k):
"""
declare a task generator method which will be executed
after the functions of given name(s)
"""
def deco(func):
setattr(task_gen, func.__name__, func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
def extension(var):
"""
declare a task generator method which will be invoked during
the processing of source files for the extension given
"""
def deco(func):
setattr(task_gen, func.__name__, func)
try:
for x in Utils.to_list(var):
task_gen.mappings[x] = func
except:
raise Utils.WafError('extension takes either a list or a string %r' % var)
task_gen.mapped[func.__name__] = func
return func
return deco
# TODO make certain the decorators may be used here
def apply_core(self):
"""Process the attribute source
transform the names into file nodes
try to process the files by name first, later by extension"""
# get the list of folders to use by the scanners
# all our objects share the same include paths anyway
find_resource = self.path.find_resource
for filename in self.to_list(self.source):
# if self.mappings or task_gen.mappings contains a file of the same name
x = self.get_hook(filename)
if x:
x(self, filename)
else:
node = find_resource(filename)
if not node: raise Utils.WafError("source not found: '%s' in '%s'" % (filename, str(self.path)))
self.allnodes.append(node)
for node in self.allnodes:
# self.mappings or task_gen.mappings map the file extension to a function
x = self.get_hook(node.suffix())
if not x:
raise Utils.WafError("Cannot guess how to process %s (got mappings %r in %r) -> try conf.check_tool(..)?" % \
(str(node), self.__class__.mappings.keys(), self.__class__))
x(self, node)
feature('*')(apply_core)
def exec_rule(self):
"""Process the attribute rule, when provided the method apply_core will be disabled
"""
if not getattr(self, 'rule', None):
return
# someone may have removed it already
try:
self.meths.remove('apply_core')
except ValueError:
pass
# get the function and the variables
func = self.rule
vars2 = []
if isinstance(func, str):
# use the shell by default for user-defined commands
(func, vars2) = Task.compile_fun('', self.rule, shell=getattr(self, 'shell', True))
func.code = self.rule
# create the task class
name = getattr(self, 'name', None) or self.target or self.rule
if not isinstance(name, str):
name = str(self.idx)
cls = Task.task_type_from_func(name, func, getattr(self, 'vars', vars2))
cls.color = getattr(self, 'color', 'BLUE')
# now create one instance
tsk = self.create_task(name)
dep_vars = getattr(self, 'dep_vars', ['ruledeps'])
if dep_vars:
tsk.dep_vars = dep_vars
if isinstance(self.rule, str):
tsk.env.ruledeps = self.rule
else:
# only works if the function is in a global module such as a waf tool
tsk.env.ruledeps = Utils.h_fun(self.rule)
# we assume that the user knows that without inputs or outputs
#if not getattr(self, 'target', None) and not getattr(self, 'source', None):
# cls.quiet = True
if getattr(self, 'target', None):
cls.quiet = True
tsk.outputs = [self.path.find_or_declare(x) for x in self.to_list(self.target)]
if getattr(self, 'source', None):
cls.quiet = True
tsk.inputs = []
for x in self.to_list(self.source):
y = self.path.find_resource(x)
if not y:
raise Utils.WafError('input file %r could not be found (%r)' % (x, self.path.abspath()))
tsk.inputs.append(y)
if self.allnodes:
tsk.inputs.extend(self.allnodes)
if getattr(self, 'scan', None):
cls.scan = self.scan
if getattr(self, 'install_path', None):
tsk.install_path = self.install_path
if getattr(self, 'cwd', None):
tsk.cwd = self.cwd
if getattr(self, 'on_results', None):
Task.update_outputs(cls)
if getattr(self, 'always', None):
Task.always_run(cls)
for x in ['after', 'before', 'ext_in', 'ext_out']:
setattr(cls, x, getattr(self, x, []))
feature('*')(exec_rule)
before('apply_core')(exec_rule)
def sequence_order(self):
"""
add a strict sequential constraint between the tasks generated by task generators
it uses the fact that task generators are posted in order
it will not post objects which belong to other folders
there is also an awesome trick for executing the method in last position
to use:
bld(features='javac seq')
bld(features='jar seq')
to start a new sequence, set the attribute seq_start, for example:
obj.seq_start = True
"""
if self.meths and self.meths[-1] != 'sequence_order':
self.meths.append('sequence_order')
return
if getattr(self, 'seq_start', None):
return
# all the tasks previously declared must be run before these
if getattr(self.bld, 'prev', None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev = self
feature('seq')(sequence_order)
ntdb-1.0/buildtools/wafadmin/Tools/ 0000775 0000000 0000000 00000000000 12241515307 0017347 5 ustar 00root root 0000000 0000000 ntdb-1.0/buildtools/wafadmin/Tools/__init__.py 0000664 0000000 0000000 00000000103 12241515307 0021452 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
ntdb-1.0/buildtools/wafadmin/Tools/ar.py 0000664 0000000 0000000 00000001535 12241515307 0020327 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2008 (ita)
# Ralf Habacker, 2006 (rh)
"ar and ranlib"
import os, sys
import Task, Utils
from Configure import conftest
ar_str = '${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}'
cls = Task.simple_task_type('static_link', ar_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False)
cls.maxjobs = 1
cls.install = Utils.nada
# remove the output in case it already exists
old = cls.run
def wrap(self):
try: os.remove(self.outputs[0].abspath(self.env))
except OSError: pass
return old(self)
setattr(cls, 'run', wrap)
def detect(conf):
conf.find_program('ar', var='AR')
conf.find_program('ranlib', var='RANLIB')
conf.env.ARFLAGS = 'rcs'
@conftest
def find_ar(conf):
v = conf.env
conf.check_tool('ar')
if not v['AR']: conf.fatal('ar is required for static libraries - not found')
ntdb-1.0/buildtools/wafadmin/Tools/bison.py 0000664 0000000 0000000 00000002000 12241515307 0021023 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# John O'Meara, 2006
# Thomas Nagy 2009
"Bison processing"
import Task
from TaskGen import extension
bison = '${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}'
cls = Task.simple_task_type('bison', bison, 'GREEN', ext_in='.yc .y .yy', ext_out='.c .cxx .h .l', shell=False)
@extension(['.y', '.yc', '.yy'])
def big_bison(self, node):
"""when it becomes complicated (unlike flex), the old recipes work better (cwd)"""
has_h = '-d' in self.env['BISONFLAGS']
outs = []
if node.name.endswith('.yc'):
outs.append(node.change_ext('.tab.cc'))
if has_h:
outs.append(node.change_ext('.tab.hh'))
else:
outs.append(node.change_ext('.tab.c'))
if has_h:
outs.append(node.change_ext('.tab.h'))
tsk = self.create_task('bison', node, outs)
tsk.cwd = node.bld_dir(tsk.env)
# and the c/cxx file must be compiled too
self.allnodes.append(outs[0])
def detect(conf):
bison = conf.find_program('bison', var='BISON', mandatory=True)
conf.env['BISONFLAGS'] = '-d'
ntdb-1.0/buildtools/wafadmin/Tools/cc.py 0000664 0000000 0000000 00000005517 12241515307 0020316 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"Base for c programs/libraries"
import os
import TaskGen, Build, Utils, Task
from Logs import debug
import ccroot
from TaskGen import feature, before, extension, after
g_cc_flag_vars = [
'CCDEPS', 'FRAMEWORK', 'FRAMEWORKPATH',
'STATICLIB', 'LIB', 'LIBPATH', 'LINKFLAGS', 'RPATH',
'CCFLAGS', 'CPPPATH', 'CPPFLAGS', 'CCDEFINES']
EXT_CC = ['.c']
g_cc_type_vars = ['CCFLAGS', 'LINKFLAGS']
# TODO remove in waf 1.6
class cc_taskgen(ccroot.ccroot_abstract):
pass
@feature('cc')
@before('apply_type_vars')
@after('default_cc')
def init_cc(self):
self.p_flag_vars = set(self.p_flag_vars).union(g_cc_flag_vars)
self.p_type_vars = set(self.p_type_vars).union(g_cc_type_vars)
if not self.env['CC_NAME']:
raise Utils.WafError("At least one compiler (gcc, ..) must be selected")
@feature('cc')
@after('apply_incpaths')
def apply_obj_vars_cc(self):
"""after apply_incpaths for INC_PATHS"""
env = self.env
app = env.append_unique
cpppath_st = env['CPPPATH_ST']
# local flags come first
# set the user-defined includes paths
for i in env['INC_PATHS']:
app('_CCINCFLAGS', cpppath_st % i.bldpath(env))
app('_CCINCFLAGS', cpppath_st % i.srcpath(env))
# set the library include paths
for i in env['CPPPATH']:
app('_CCINCFLAGS', cpppath_st % i)
@feature('cc')
@after('apply_lib_vars')
def apply_defines_cc(self):
"""after uselib is set for CCDEFINES"""
self.defines = getattr(self, 'defines', [])
lst = self.to_list(self.defines) + self.to_list(self.env['CCDEFINES'])
milst = []
# now process the local defines
for defi in lst:
if not defi in milst:
milst.append(defi)
# CCDEFINES_
libs = self.to_list(self.uselib)
for l in libs:
val = self.env['CCDEFINES_'+l]
if val: milst += val
self.env['DEFLINES'] = ["%s %s" % (x[0], Utils.trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in milst]]
y = self.env['CCDEFINES_ST']
self.env.append_unique('_CCDEFFLAGS', [y%x for x in milst])
@extension(EXT_CC)
def c_hook(self, node):
# create the compilation task: cpp or cc
if getattr(self, 'obj_ext', None):
obj_ext = self.obj_ext
else:
obj_ext = '_%d.o' % self.idx
task = self.create_task('cc', node, node.change_ext(obj_ext))
try:
self.compiled_tasks.append(task)
except AttributeError:
raise Utils.WafError('Have you forgotten to set the feature "cc" on %s?' % str(self))
return task
cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT}'
cls = Task.simple_task_type('cc', cc_str, 'GREEN', ext_out='.o', ext_in='.c', shell=False)
cls.scan = ccroot.scan
cls.vars.append('CCDEPS')
link_str = '${LINK_CC} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath(env)} ${LINKFLAGS}'
cls = Task.simple_task_type('cc_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False)
cls.maxjobs = 1
cls.install = Utils.nada
ntdb-1.0/buildtools/wafadmin/Tools/ccroot.py 0000664 0000000 0000000 00000045067 12241515307 0021226 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"base for all c/c++ programs and libraries"
import os, sys, re
import TaskGen, Task, Utils, preproc, Logs, Build, Options
from Logs import error, debug, warn
from Utils import md5
from TaskGen import taskgen, after, before, feature
from Constants import *
from Configure import conftest
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import config_c # <- necessary for the configuration, do not touch
USE_TOP_LEVEL = False
def get_cc_version(conf, cc, gcc=False, icc=False):
cmd = cc + ['-dM', '-E', '-']
try:
p = Utils.pproc.Popen(cmd, stdin=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE)
p.stdin.write('\n')
out = p.communicate()[0]
except:
conf.fatal('could not determine the compiler version %r' % cmd)
# PY3K: do not touch
out = str(out)
if gcc:
if out.find('__INTEL_COMPILER') >= 0:
conf.fatal('The intel compiler pretends to be gcc')
if out.find('__GNUC__') < 0:
conf.fatal('Could not determine the compiler type')
if icc and out.find('__INTEL_COMPILER') < 0:
conf.fatal('Not icc/icpc')
k = {}
if icc or gcc:
out = out.split('\n')
import shlex
for line in out:
lst = shlex.split(line)
if len(lst)>2:
key = lst[1]
val = lst[2]
k[key] = val
def isD(var):
return var in k
def isT(var):
return var in k and k[var] != '0'
# Some documentation is available at http://predef.sourceforge.net
# The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns.
mp1 = {
'__linux__' : 'linux',
'__GNU__' : 'gnu',
'__FreeBSD__' : 'freebsd',
'__NetBSD__' : 'netbsd',
'__OpenBSD__' : 'openbsd',
'__sun' : 'sunos',
'__hpux' : 'hpux',
'__sgi' : 'irix',
'_AIX' : 'aix',
'__CYGWIN__' : 'cygwin',
'__MSYS__' : 'msys',
'_UWIN' : 'uwin',
'_WIN64' : 'win32',
'_WIN32' : 'win32',
'__POWERPC__' : 'powerpc',
}
for i in mp1:
if isD(i):
conf.env.DEST_OS = mp1[i]
break
else:
if isD('__APPLE__') and isD('__MACH__'):
conf.env.DEST_OS = 'darwin'
elif isD('__unix__'): # unix must be tested last as it's a generic fallback
conf.env.DEST_OS = 'generic'
if isD('__ELF__'):
conf.env.DEST_BINFMT = 'elf'
elif isD('__WINNT__') or isD('__CYGWIN__'):
conf.env.DEST_BINFMT = 'pe'
elif isD('__APPLE__'):
conf.env.DEST_BINFMT = 'mac-o'
mp2 = {
'__x86_64__' : 'x86_64',
'__i386__' : 'x86',
'__ia64__' : 'ia',
'__mips__' : 'mips',
'__sparc__' : 'sparc',
'__alpha__' : 'alpha',
'__arm__' : 'arm',
'__hppa__' : 'hppa',
'__powerpc__' : 'powerpc',
}
for i in mp2:
if isD(i):
conf.env.DEST_CPU = mp2[i]
break
debug('ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')]))
conf.env['CC_VERSION'] = (k['__GNUC__'], k['__GNUC_MINOR__'], k['__GNUC_PATCHLEVEL__'])
return k
class DEBUG_LEVELS:
"""Will disappear in waf 1.6"""
ULTRADEBUG = "ultradebug"
DEBUG = "debug"
RELEASE = "release"
OPTIMIZED = "optimized"
CUSTOM = "custom"
ALL = [ULTRADEBUG, DEBUG, RELEASE, OPTIMIZED, CUSTOM]
def scan(self):
"look for .h the .cpp need"
debug('ccroot: _scan_preprocessor(self, node, env, path_lst)')
# TODO waf 1.6 - assume the default input has exactly one file
if len(self.inputs) == 1:
node = self.inputs[0]
(nodes, names) = preproc.get_deps(node, self.env, nodepaths = self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r', str(node), nodes, names)
return (nodes, names)
all_nodes = []
all_names = []
seen = set()
for node in self.inputs:
(nodes, names) = preproc.get_deps(node, self.env, nodepaths = self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r', str(node), nodes, names)
for x in nodes:
if id(x) in seen: continue
seen.add(id(x))
all_nodes.append(x)
for x in names:
if not x in all_names:
all_names.append(x)
return (all_nodes, all_names)
class ccroot_abstract(TaskGen.task_gen):
"Parent class for programs and libraries in languages c, c++ and moc (Qt)"
def __init__(self, *k, **kw):
# COMPAT remove in waf 1.6 TODO
if len(k) > 1:
k = list(k)
if k[1][0] != 'c':
k[1] = 'c' + k[1]
TaskGen.task_gen.__init__(self, *k, **kw)
def get_target_name(self):
tp = 'program'
for x in self.features:
if x in ['cshlib', 'cstaticlib']:
tp = x.lstrip('c')
pattern = self.env[tp + '_PATTERN']
if not pattern: pattern = '%s'
dir, name = os.path.split(self.target)
if self.env.DEST_BINFMT == 'pe' and getattr(self, 'vnum', None) and 'cshlib' in self.features:
# include the version in the dll file name,
# the import lib file name stays unversionned.
name = name + '-' + self.vnum.split('.')[0]
return os.path.join(dir, pattern % name)
@feature('cc', 'cxx')
@before('apply_core')
def default_cc(self):
"""compiled_tasks attribute must be set before the '.c->.o' tasks can be created"""
Utils.def_attrs(self,
includes = '',
defines= '',
rpaths = '',
uselib = '',
uselib_local = '',
add_objects = '',
p_flag_vars = [],
p_type_vars = [],
compiled_tasks = [],
link_task = None)
# The only thing we need for cross-compilation is DEST_BINFMT.
# At some point, we may reach a case where DEST_BINFMT is not enough, but for now it's sufficient.
# Currently, cross-compilation is auto-detected only for the gnu and intel compilers.
if not self.env.DEST_BINFMT:
# Infer the binary format from the os name.
self.env.DEST_BINFMT = Utils.unversioned_sys_platform_to_binary_format(
self.env.DEST_OS or Utils.unversioned_sys_platform())
if not self.env.BINDIR: self.env.BINDIR = Utils.subst_vars('${PREFIX}/bin', self.env)
if not self.env.LIBDIR: self.env.LIBDIR = Utils.subst_vars('${PREFIX}/lib${LIB_EXT}', self.env)
@feature('cprogram', 'dprogram', 'cstaticlib', 'dstaticlib', 'cshlib', 'dshlib')
def apply_verif(self):
"""no particular order, used for diagnostic"""
if not (self.source or getattr(self, 'add_objects', None) or getattr(self, 'uselib_local', None) or getattr(self, 'obj_files', None)):
raise Utils.WafError('no source files specified for %s' % self)
if not self.target:
raise Utils.WafError('no target for %s' % self)
# TODO reference the d programs, shlibs in d.py, not here
@feature('cprogram', 'dprogram')
@after('default_cc')
@before('apply_core')
def vars_target_cprogram(self):
self.default_install_path = self.env.BINDIR
self.default_chmod = O755
@after('default_cc')
@feature('cshlib', 'dshlib')
@before('apply_core')
def vars_target_cshlib(self):
if self.env.DEST_BINFMT == 'pe':
# set execute bit on libs to avoid 'permission denied' (issue 283)
self.default_chmod = O755
self.default_install_path = self.env.BINDIR
else:
self.default_install_path = self.env.LIBDIR
@feature('cprogram', 'dprogram', 'cstaticlib', 'dstaticlib', 'cshlib', 'dshlib')
@after('apply_link', 'vars_target_cprogram', 'vars_target_cshlib')
def default_link_install(self):
"""you may kill this method to inject your own installation for the first element
any other install should only process its own nodes and not those from the others"""
if self.install_path:
self.bld.install_files(self.install_path, self.link_task.outputs[0], env=self.env, chmod=self.chmod)
@feature('cc', 'cxx')
@after('apply_type_vars', 'apply_lib_vars', 'apply_core')
def apply_incpaths(self):
"""used by the scanner
after processing the uselib for CPPPATH
after apply_core because some processing may add include paths
"""
lst = []
# TODO move the uselib processing out of here
for lib in self.to_list(self.uselib):
for path in self.env['CPPPATH_' + lib]:
if not path in lst:
lst.append(path)
if preproc.go_absolute:
for path in preproc.standard_includes:
if not path in lst:
lst.append(path)
for path in self.to_list(self.includes):
if not path in lst:
if preproc.go_absolute or not os.path.isabs(path):
lst.append(path)
else:
self.env.prepend_value('CPPPATH', path)
for path in lst:
node = None
if os.path.isabs(path):
if preproc.go_absolute:
node = self.bld.root.find_dir(path)
elif path[0] == '#':
node = self.bld.srcnode
if len(path) > 1:
node = node.find_dir(path[1:])
else:
node = self.path.find_dir(path)
if node:
self.env.append_value('INC_PATHS', node)
# TODO WAF 1.6
if USE_TOP_LEVEL:
self.env.append_value('INC_PATHS', self.bld.srcnode)
@feature('cc', 'cxx')
@after('init_cc', 'init_cxx')
@before('apply_lib_vars')
def apply_type_vars(self):
"""before apply_lib_vars because we modify uselib
after init_cc and init_cxx because web need p_type_vars
"""
for x in self.features:
if not x in ['cprogram', 'cstaticlib', 'cshlib']:
continue
x = x.lstrip('c')
# if the type defines uselib to add, add them
st = self.env[x + '_USELIB']
if st: self.uselib = self.uselib + ' ' + st
# each compiler defines variables like 'shlib_CXXFLAGS', 'shlib_LINKFLAGS', etc
# so when we make a task generator of the type shlib, CXXFLAGS are modified accordingly
for var in self.p_type_vars:
compvar = '%s_%s' % (x, var)
#print compvar
value = self.env[compvar]
if value: self.env.append_value(var, value)
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_core')
def apply_link(self):
"""executes after apply_core for collecting 'compiled_tasks'
use a custom linker if specified (self.link='name-of-custom-link-task')"""
link = getattr(self, 'link', None)
if not link:
if 'cstaticlib' in self.features: link = 'static_link'
elif 'cxx' in self.features: link = 'cxx_link'
else: link = 'cc_link'
tsk = self.create_task(link)
outputs = [t.outputs[0] for t in self.compiled_tasks]
tsk.set_inputs(outputs)
tsk.set_outputs(self.path.find_or_declare(get_target_name(self)))
self.link_task = tsk
@feature('cc', 'cxx')
@after('apply_link', 'init_cc', 'init_cxx', 'apply_core')
def apply_lib_vars(self):
"""after apply_link because of 'link_task'
after default_cc because of the attribute 'uselib'"""
# after 'apply_core' in case if 'cc' if there is no link
env = self.env
# 1. the case of the libs defined in the project (visit ancestors first)
# the ancestors external libraries (uselib) will be prepended
self.uselib = self.to_list(self.uselib)
names = self.to_list(self.uselib_local)
seen = set([])
tmp = Utils.deque(names) # consume a copy of the list of names
while tmp:
lib_name = tmp.popleft()
# visit dependencies only once
if lib_name in seen:
continue
y = self.name_to_obj(lib_name)
if not y:
raise Utils.WafError('object %r was not found in uselib_local (required by %r)' % (lib_name, self.name))
y.post()
seen.add(lib_name)
# object has ancestors to process (shared libraries): add them to the end of the list
if getattr(y, 'uselib_local', None):
lst = y.to_list(y.uselib_local)
if 'cshlib' in y.features or 'cprogram' in y.features:
lst = [x for x in lst if not 'cstaticlib' in self.name_to_obj(x).features]
tmp.extend(lst)
# link task and flags
if getattr(y, 'link_task', None):
link_name = y.target[y.target.rfind(os.sep) + 1:]
if 'cstaticlib' in y.features:
env.append_value('STATICLIB', link_name)
elif 'cshlib' in y.features or 'cprogram' in y.features:
# WARNING some linkers can link against programs
env.append_value('LIB', link_name)
# the order
self.link_task.set_run_after(y.link_task)
# for the recompilation
dep_nodes = getattr(self.link_task, 'dep_nodes', [])
self.link_task.dep_nodes = dep_nodes + y.link_task.outputs
# add the link path too
tmp_path = y.link_task.outputs[0].parent.bldpath(self.env)
if not tmp_path in env['LIBPATH']: env.prepend_value('LIBPATH', tmp_path)
# add ancestors uselib too - but only propagate those that have no staticlib
for v in self.to_list(y.uselib):
if not env['STATICLIB_' + v]:
if not v in self.uselib:
self.uselib.insert(0, v)
# if the library task generator provides 'export_incdirs', add to the include path
# the export_incdirs must be a list of paths relative to the other library
if getattr(y, 'export_incdirs', None):
for x in self.to_list(y.export_incdirs):
node = y.path.find_dir(x)
if not node:
raise Utils.WafError('object %r: invalid folder %r in export_incdirs' % (y.target, x))
self.env.append_unique('INC_PATHS', node)
# 2. the case of the libs defined outside
for x in self.uselib:
for v in self.p_flag_vars:
val = self.env[v + '_' + x]
if val: self.env.append_value(v, val)
@feature('cprogram', 'cstaticlib', 'cshlib')
@after('init_cc', 'init_cxx', 'apply_link')
def apply_objdeps(self):
"add the .o files produced by some other object files in the same manner as uselib_local"
if not getattr(self, 'add_objects', None): return
seen = []
names = self.to_list(self.add_objects)
while names:
x = names[0]
# visit dependencies only once
if x in seen:
names = names[1:]
continue
# object does not exist ?
y = self.name_to_obj(x)
if not y:
raise Utils.WafError('object %r was not found in uselib_local (required by add_objects %r)' % (x, self.name))
# object has ancestors to process first ? update the list of names
if getattr(y, 'add_objects', None):
added = 0
lst = y.to_list(y.add_objects)
lst.reverse()
for u in lst:
if u in seen: continue
added = 1
names = [u]+names
if added: continue # list of names modified, loop
# safe to process the current object
y.post()
seen.append(x)
for t in y.compiled_tasks:
self.link_task.inputs.extend(t.outputs)
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_lib_vars')
def apply_obj_vars(self):
"""after apply_lib_vars for uselib"""
v = self.env
lib_st = v['LIB_ST']
staticlib_st = v['STATICLIB_ST']
libpath_st = v['LIBPATH_ST']
staticlibpath_st = v['STATICLIBPATH_ST']
rpath_st = v['RPATH_ST']
app = v.append_unique
if v['FULLSTATIC']:
v.append_value('LINKFLAGS', v['FULLSTATIC_MARKER'])
for i in v['RPATH']:
if i and rpath_st:
app('LINKFLAGS', rpath_st % i)
for i in v['LIBPATH']:
app('LINKFLAGS', libpath_st % i)
app('LINKFLAGS', staticlibpath_st % i)
if v['STATICLIB']:
v.append_value('LINKFLAGS', v['STATICLIB_MARKER'])
k = [(staticlib_st % i) for i in v['STATICLIB']]
app('LINKFLAGS', k)
# fully static binaries ?
if not v['FULLSTATIC']:
if v['STATICLIB'] or v['LIB']:
v.append_value('LINKFLAGS', v['SHLIB_MARKER'])
app('LINKFLAGS', [lib_st % i for i in v['LIB']])
@after('apply_link')
def process_obj_files(self):
if not hasattr(self, 'obj_files'): return
for x in self.obj_files:
node = self.path.find_resource(x)
self.link_task.inputs.append(node)
@taskgen
def add_obj_file(self, file):
"""Small example on how to link object files as if they were source
obj = bld.create_obj('cc')
obj.add_obj_file('foo.o')"""
if not hasattr(self, 'obj_files'): self.obj_files = []
if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files')
self.obj_files.append(file)
c_attrs = {
'cxxflag' : 'CXXFLAGS',
'cflag' : 'CCFLAGS',
'ccflag' : 'CCFLAGS',
'linkflag' : 'LINKFLAGS',
'ldflag' : 'LINKFLAGS',
'lib' : 'LIB',
'libpath' : 'LIBPATH',
'staticlib': 'STATICLIB',
'staticlibpath': 'STATICLIBPATH',
'rpath' : 'RPATH',
'framework' : 'FRAMEWORK',
'frameworkpath' : 'FRAMEWORKPATH'
}
@feature('cc', 'cxx')
@before('init_cxx', 'init_cc')
@before('apply_lib_vars', 'apply_obj_vars', 'apply_incpaths', 'init_cc')
def add_extra_flags(self):
"""case and plural insensitive
before apply_obj_vars for processing the library attributes
"""
for x in self.__dict__.keys():
y = x.lower()
if y[-1] == 's':
y = y[:-1]
if c_attrs.get(y, None):
self.env.append_unique(c_attrs[y], getattr(self, x))
# ============ the code above must not know anything about import libs ==========
@feature('cshlib')
@after('apply_link', 'default_cc')
@before('apply_lib_vars', 'apply_objdeps', 'default_link_install')
def apply_implib(self):
"""On mswindows, handle dlls and their import libs
the .dll.a is the import lib and it is required for linking so it is installed too
"""
if not self.env.DEST_BINFMT == 'pe':
return
self.meths.remove('default_link_install')
bindir = self.install_path
if not bindir: return
# install the dll in the bin dir
dll = self.link_task.outputs[0]
self.bld.install_files(bindir, dll, self.env, self.chmod)
# add linker flags to generate the import lib
implib = self.env['implib_PATTERN'] % os.path.split(self.target)[1]
implib = dll.parent.find_or_declare(implib)
self.link_task.outputs.append(implib)
self.bld.install_as('${LIBDIR}/%s' % implib.name, implib, self.env)
self.env.append_value('LINKFLAGS', (self.env['IMPLIB_ST'] % implib.bldpath(self.env)).split())
# ============ the code above must not know anything about vnum processing on unix platforms =========
@feature('cshlib')
@after('apply_link')
@before('apply_lib_vars', 'default_link_install')
def apply_vnum(self):
"""
libfoo.so is installed as libfoo.so.1.2.3
"""
if not getattr(self, 'vnum', '') or not 'cshlib' in self.features or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'):
return
self.meths.remove('default_link_install')
link = self.link_task
nums = self.vnum.split('.')
node = link.outputs[0]
libname = node.name
if libname.endswith('.dylib'):
name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum)
name2 = libname.replace('.dylib', '.%s.dylib' % nums[0])
else:
name3 = libname + '.' + self.vnum
name2 = libname + '.' + nums[0]
if self.env.SONAME_ST:
v = self.env.SONAME_ST % name2
self.env.append_value('LINKFLAGS', v.split())
bld = self.bld
nums = self.vnum.split('.')
path = self.install_path
if not path: return
if self.env.DEST_OS == 'openbsd':
bld.install_as(path + os.sep + name2, node, env=self.env, chmod=self.link_task.chmod)
else:
bld.install_as(path + os.sep + name3, node, env=self.env)
bld.symlink_as(path + os.sep + name2, name3)
bld.symlink_as(path + os.sep + libname, name3)
# the following task is just to enable execution from the build dir :-/
self.create_task('vnum', node, [node.parent.find_or_declare(name2), node.parent.find_or_declare(name3)])
def exec_vnum_link(self):
for x in self.outputs:
path = x.abspath(self.env)
try:
os.remove(path)
except OSError:
pass
try:
os.symlink(self.inputs[0].name, path)
except OSError:
return 1
cls = Task.task_type_from_func('vnum', func=exec_vnum_link, ext_in='.bin', color='CYAN')
cls.quiet = 1
# ============ the --as-needed flag should added during the configuration, not at runtime =========
@conftest
def add_as_needed(conf):
if conf.env.DEST_BINFMT == 'elf' and 'gcc' in (conf.env.CXX_NAME, conf.env.CC_NAME):
conf.env.append_unique('LINKFLAGS', '--as-needed')
ntdb-1.0/buildtools/wafadmin/Tools/compiler_cc.py 0000664 0000000 0000000 00000004074 12241515307 0022205 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Matthias Jahn jahn dôt matthias ât freenet dôt de, 2007 (pmarat)
import os, sys, imp, types, ccroot
import optparse
import Utils, Configure, Options
from Logs import debug
c_compiler = {
'win32': ['msvc', 'gcc'],
'cygwin': ['gcc'],
'darwin': ['gcc'],
'aix': ['xlc', 'gcc'],
'linux': ['gcc', 'icc', 'suncc'],
'sunos': ['gcc', 'suncc'],
'irix': ['gcc'],
'hpux': ['gcc'],
'gnu': ['gcc'],
'default': ['gcc']
}
def __list_possible_compiler(platform):
try:
return c_compiler[platform]
except KeyError:
return c_compiler["default"]
def detect(conf):
"""
for each compiler for the platform, try to configure the compiler
in theory the tools should raise a configuration error if the compiler
pretends to be something it is not (setting CC=icc and trying to configure gcc)
"""
try: test_for_compiler = Options.options.check_c_compiler
except AttributeError: conf.fatal("Add set_options(opt): opt.tool_options('compiler_cc')")
orig = conf.env
for compiler in test_for_compiler.split():
conf.env = orig.copy()
try:
conf.check_tool(compiler)
except Configure.ConfigurationError, e:
debug('compiler_cc: %r' % e)
else:
if conf.env['CC']:
orig.table = conf.env.get_merged_dict()
conf.env = orig
conf.check_message(compiler, '', True)
conf.env['COMPILER_CC'] = compiler
break
conf.check_message(compiler, '', False)
break
else:
conf.fatal('could not configure a c compiler!')
def set_options(opt):
build_platform = Utils.unversioned_sys_platform()
possible_compiler_list = __list_possible_compiler(build_platform)
test_for_compiler = ' '.join(possible_compiler_list)
cc_compiler_opts = opt.add_option_group("C Compiler Options")
cc_compiler_opts.add_option('--check-c-compiler', default="%s" % test_for_compiler,
help='On this platform (%s) the following C-Compiler will be checked by default: "%s"' % (build_platform, test_for_compiler),
dest="check_c_compiler")
for c_compiler in test_for_compiler.split():
opt.tool_options('%s' % c_compiler, option_group=cc_compiler_opts)
ntdb-1.0/buildtools/wafadmin/Tools/compiler_cxx.py 0000664 0000000 0000000 00000003606 12241515307 0022422 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Matthias Jahn jahn dôt matthias ât freenet dôt de 2007 (pmarat)
import os, sys, imp, types, ccroot
import optparse
import Utils, Configure, Options
from Logs import debug
cxx_compiler = {
'win32': ['msvc', 'g++'],
'cygwin': ['g++'],
'darwin': ['g++'],
'aix': ['xlc++', 'g++'],
'linux': ['g++', 'icpc', 'sunc++'],
'sunos': ['g++', 'sunc++'],
'irix': ['g++'],
'hpux': ['g++'],
'gnu': ['g++'],
'default': ['g++']
}
def __list_possible_compiler(platform):
try:
return cxx_compiler[platform]
except KeyError:
return cxx_compiler["default"]
def detect(conf):
try: test_for_compiler = Options.options.check_cxx_compiler
except AttributeError: raise Configure.ConfigurationError("Add set_options(opt): opt.tool_options('compiler_cxx')")
orig = conf.env
for compiler in test_for_compiler.split():
try:
conf.env = orig.copy()
conf.check_tool(compiler)
except Configure.ConfigurationError, e:
debug('compiler_cxx: %r' % e)
else:
if conf.env['CXX']:
orig.table = conf.env.get_merged_dict()
conf.env = orig
conf.check_message(compiler, '', True)
conf.env['COMPILER_CXX'] = compiler
break
conf.check_message(compiler, '', False)
break
else:
conf.fatal('could not configure a cxx compiler!')
def set_options(opt):
build_platform = Utils.unversioned_sys_platform()
possible_compiler_list = __list_possible_compiler(build_platform)
test_for_compiler = ' '.join(possible_compiler_list)
cxx_compiler_opts = opt.add_option_group('C++ Compiler Options')
cxx_compiler_opts.add_option('--check-cxx-compiler', default="%s" % test_for_compiler,
help='On this platform (%s) the following C++ Compiler will be checked by default: "%s"' % (build_platform, test_for_compiler),
dest="check_cxx_compiler")
for cxx_compiler in test_for_compiler.split():
opt.tool_options('%s' % cxx_compiler, option_group=cxx_compiler_opts)
ntdb-1.0/buildtools/wafadmin/Tools/compiler_d.py 0000664 0000000 0000000 00000001515 12241515307 0022040 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2007 (dv)
import os, sys, imp, types
import Utils, Configure, Options
def detect(conf):
if getattr(Options.options, 'check_dmd_first', None):
test_for_compiler = ['dmd', 'gdc']
else:
test_for_compiler = ['gdc', 'dmd']
for d_compiler in test_for_compiler:
try:
conf.check_tool(d_compiler)
except:
pass
else:
break
else:
conf.fatal('no suitable d compiler was found')
def set_options(opt):
d_compiler_opts = opt.add_option_group('D Compiler Options')
d_compiler_opts.add_option('--check-dmd-first', action='store_true',
help='checks for the gdc compiler before dmd (default is the other way round)',
dest='check_dmd_first',
default=False)
for d_compiler in ['gdc', 'dmd']:
opt.tool_options('%s' % d_compiler, option_group=d_compiler_opts)
ntdb-1.0/buildtools/wafadmin/Tools/config_c.py 0000664 0000000 0000000 00000046351 12241515307 0021501 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"""
c/c++ configuration routines
"""
import os, imp, sys, shlex, shutil
from Utils import md5
import Build, Utils, Configure, Task, Options, Logs, TaskGen
from Constants import *
from Configure import conf, conftest
cfg_ver = {
'atleast-version': '>=',
'exact-version': '==',
'max-version': '<=',
}
SNIP1 = '''
int main() {
void *p;
p=(void*)(%s);
return 0;
}
'''
SNIP2 = '''
int main() {
if ((%(type_name)s *) 0) return 0;
if (sizeof (%(type_name)s)) return 0;
}
'''
SNIP3 = '''
int main() {
return 0;
}
'''
def parse_flags(line, uselib, env):
"""pkg-config still has bugs on some platforms, and there are many -config programs, parsing flags is necessary :-/"""
lst = shlex.split(line)
while lst:
x = lst.pop(0)
st = x[:2]
ot = x[2:]
app = env.append_value
if st == '-I' or st == '/I':
if not ot: ot = lst.pop(0)
app('CPPPATH_' + uselib, ot)
elif st == '-D':
if not ot: ot = lst.pop(0)
app('CXXDEFINES_' + uselib, ot)
app('CCDEFINES_' + uselib, ot)
elif st == '-l':
if not ot: ot = lst.pop(0)
app('LIB_' + uselib, ot)
elif st == '-L':
if not ot: ot = lst.pop(0)
app('LIBPATH_' + uselib, ot)
elif x == '-pthread' or x.startswith('+'):
app('CCFLAGS_' + uselib, x)
app('CXXFLAGS_' + uselib, x)
app('LINKFLAGS_' + uselib, x)
elif x == '-framework':
app('FRAMEWORK_' + uselib, lst.pop(0))
elif x.startswith('-F'):
app('FRAMEWORKPATH_' + uselib, x[2:])
elif x.startswith('-std'):
app('CCFLAGS_' + uselib, x)
app('CXXFLAGS_' + uselib, x)
app('LINKFLAGS_' + uselib, x)
#
# NOTE on special treatment of -Wl,-R and -Wl,-rpath:
#
# It is important to not put a library provided RPATH
# into the LINKFLAGS but in the RPATH instead, since
# the provided LINKFLAGS get prepended to our own internal
# RPATH later, and hence can potentially lead to linking
# in too old versions of our internal libs.
#
elif x.startswith('-Wl,-R'):
app('RPATH_' + uselib, x[6:])
elif x.startswith('-Wl,-rpath,'):
app('RPATH_' + uselib, x[11:])
elif x.startswith('-Wl'):
app('LINKFLAGS_' + uselib, x)
elif x.startswith('-m') or x.startswith('-f'):
app('CCFLAGS_' + uselib, x)
app('CXXFLAGS_' + uselib, x)
@conf
def ret_msg(self, f, kw):
"""execute a function, when provided"""
if isinstance(f, str):
return f
return f(kw)
@conf
def validate_cfg(self, kw):
if not 'path' in kw:
kw['path'] = 'pkg-config --errors-to-stdout --print-errors'
# pkg-config version
if 'atleast_pkgconfig_version' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for pkg-config version >= %s' % kw['atleast_pkgconfig_version']
return
# pkg-config --modversion
if 'modversion' in kw:
return
if 'variables' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for %s variables' % kw['package']
return
# checking for the version of a module, for the moment, one thing at a time
for x in cfg_ver.keys():
y = x.replace('-', '_')
if y in kw:
if not 'package' in kw:
raise ValueError('%s requires a package' % x)
if not 'msg' in kw:
kw['msg'] = 'Checking for %s %s %s' % (kw['package'], cfg_ver[x], kw[y])
return
if not 'msg' in kw:
kw['msg'] = 'Checking for %s' % (kw['package'] or kw['path'])
if not 'okmsg' in kw:
kw['okmsg'] = 'yes'
if not 'errmsg' in kw:
kw['errmsg'] = 'not found'
@conf
def cmd_and_log(self, cmd, kw):
Logs.debug('runner: %s\n' % cmd)
if self.log:
self.log.write('%s\n' % cmd)
try:
p = Utils.pproc.Popen(cmd, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE, shell=True)
(out, err) = p.communicate()
except OSError, e:
self.log.write('error %r' % e)
self.fatal(str(e))
# placeholder, don't touch
out = str(out)
err = str(err)
if self.log:
self.log.write(out)
self.log.write(err)
if p.returncode:
if not kw.get('errmsg', ''):
if kw.get('mandatory', False):
kw['errmsg'] = out.strip()
else:
kw['errmsg'] = 'no'
self.fatal('fail')
return out
@conf
def exec_cfg(self, kw):
# pkg-config version
if 'atleast_pkgconfig_version' in kw:
cmd = '%s --atleast-pkgconfig-version=%s' % (kw['path'], kw['atleast_pkgconfig_version'])
self.cmd_and_log(cmd, kw)
if not 'okmsg' in kw:
kw['okmsg'] = 'yes'
return
# checking for the version of a module
for x in cfg_ver:
y = x.replace('-', '_')
if y in kw:
self.cmd_and_log('%s --%s=%s %s' % (kw['path'], x, kw[y], kw['package']), kw)
if not 'okmsg' in kw:
kw['okmsg'] = 'yes'
self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0)
break
# retrieving the version of a module
if 'modversion' in kw:
version = self.cmd_and_log('%s --modversion %s' % (kw['path'], kw['modversion']), kw).strip()
self.define('%s_VERSION' % Utils.quote_define_name(kw.get('uselib_store', kw['modversion'])), version)
return version
# retrieving variables of a module
if 'variables' in kw:
env = kw.get('env', self.env)
uselib = kw.get('uselib_store', kw['package'].upper())
vars = Utils.to_list(kw['variables'])
for v in vars:
val = self.cmd_and_log('%s --variable=%s %s' % (kw['path'], v, kw['package']), kw).strip()
var = '%s_%s' % (uselib, v)
env[var] = val
if not 'okmsg' in kw:
kw['okmsg'] = 'yes'
return
lst = [kw['path']]
defi = kw.get('define_variable', None)
if not defi:
defi = self.env.PKG_CONFIG_DEFINES or {}
for key, val in defi.iteritems():
lst.append('--define-variable=%s=%s' % (key, val))
lst.append(kw.get('args', ''))
lst.append(kw['package'])
# so we assume the command-line will output flags to be parsed afterwards
cmd = ' '.join(lst)
ret = self.cmd_and_log(cmd, kw)
if not 'okmsg' in kw:
kw['okmsg'] = 'yes'
self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0)
parse_flags(ret, kw.get('uselib_store', kw['package'].upper()), kw.get('env', self.env))
return ret
@conf
def check_cfg(self, *k, **kw):
"""
for pkg-config mostly, but also all the -config tools
conf.check_cfg(path='mpicc', args='--showme:compile --showme:link', package='', uselib_store='OPEN_MPI')
conf.check_cfg(package='dbus-1', variables='system_bus_default_address session_bus_services_dir')
"""
self.validate_cfg(kw)
if 'msg' in kw:
self.check_message_1(kw['msg'])
ret = None
try:
ret = self.exec_cfg(kw)
except Configure.ConfigurationError, e:
if 'errmsg' in kw:
self.check_message_2(kw['errmsg'], 'YELLOW')
if 'mandatory' in kw and kw['mandatory']:
if Logs.verbose > 1:
raise
else:
self.fatal('the configuration failed (see %r)' % self.log.name)
else:
kw['success'] = ret
if 'okmsg' in kw:
self.check_message_2(self.ret_msg(kw['okmsg'], kw))
return ret
# the idea is the following: now that we are certain
# that all the code here is only for c or c++, it is
# easy to put all the logic in one function
#
# this should prevent code duplication (ita)
# env: an optional environment (modified -> provide a copy)
# compiler: cc or cxx - it tries to guess what is best
# type: cprogram, cshlib, cstaticlib
# code: a c code to execute
# uselib_store: where to add the variables
# uselib: parameters to use for building
# define: define to set, like FOO in #define FOO, if not set, add /* #undef FOO */
# execute: True or False - will return the result of the execution
@conf
def validate_c(self, kw):
"""validate the parameters for the test method"""
if not 'env' in kw:
kw['env'] = self.env.copy()
env = kw['env']
if not 'compiler' in kw:
kw['compiler'] = 'cc'
if env['CXX_NAME'] and Task.TaskBase.classes.get('cxx', None):
kw['compiler'] = 'cxx'
if not self.env['CXX']:
self.fatal('a c++ compiler is required')
else:
if not self.env['CC']:
self.fatal('a c compiler is required')
if not 'type' in kw:
kw['type'] = 'cprogram'
assert not(kw['type'] != 'cprogram' and kw.get('execute', 0)), 'can only execute programs'
#if kw['type'] != 'program' and kw.get('execute', 0):
# raise ValueError, 'can only execute programs'
def to_header(dct):
if 'header_name' in dct:
dct = Utils.to_list(dct['header_name'])
return ''.join(['#include <%s>\n' % x for x in dct])
return ''
# set the file name
if not 'compile_mode' in kw:
kw['compile_mode'] = (kw['compiler'] == 'cxx') and 'cxx' or 'cc'
if not 'compile_filename' in kw:
kw['compile_filename'] = 'test.c' + ((kw['compile_mode'] == 'cxx') and 'pp' or '')
#OSX
if 'framework_name' in kw:
try: TaskGen.task_gen.create_task_macapp
except AttributeError: self.fatal('frameworks require the osx tool')
fwkname = kw['framework_name']
if not 'uselib_store' in kw:
kw['uselib_store'] = fwkname.upper()
if not kw.get('no_header', False):
if not 'header_name' in kw:
kw['header_name'] = []
fwk = '%s/%s.h' % (fwkname, fwkname)
if kw.get('remove_dot_h', None):
fwk = fwk[:-2]
kw['header_name'] = Utils.to_list(kw['header_name']) + [fwk]
kw['msg'] = 'Checking for framework %s' % fwkname
kw['framework'] = fwkname
#kw['frameworkpath'] = set it yourself
if 'function_name' in kw:
fu = kw['function_name']
if not 'msg' in kw:
kw['msg'] = 'Checking for function %s' % fu
kw['code'] = to_header(kw) + SNIP1 % fu
if not 'uselib_store' in kw:
kw['uselib_store'] = fu.upper()
if not 'define_name' in kw:
kw['define_name'] = self.have_define(fu)
elif 'type_name' in kw:
tu = kw['type_name']
if not 'msg' in kw:
kw['msg'] = 'Checking for type %s' % tu
if not 'header_name' in kw:
kw['header_name'] = 'stdint.h'
kw['code'] = to_header(kw) + SNIP2 % {'type_name' : tu}
if not 'define_name' in kw:
kw['define_name'] = self.have_define(tu.upper())
elif 'header_name' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for header %s' % kw['header_name']
l = Utils.to_list(kw['header_name'])
assert len(l)>0, 'list of headers in header_name is empty'
kw['code'] = to_header(kw) + SNIP3
if not 'uselib_store' in kw:
kw['uselib_store'] = l[0].upper()
if not 'define_name' in kw:
kw['define_name'] = self.have_define(l[0])
if 'lib' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for library %s' % kw['lib']
if not 'uselib_store' in kw:
kw['uselib_store'] = kw['lib'].upper()
if 'staticlib' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for static library %s' % kw['staticlib']
if not 'uselib_store' in kw:
kw['uselib_store'] = kw['staticlib'].upper()
if 'fragment' in kw:
# an additional code fragment may be provided to replace the predefined code
# in custom headers
kw['code'] = kw['fragment']
if not 'msg' in kw:
kw['msg'] = 'Checking for custom code'
if not 'errmsg' in kw:
kw['errmsg'] = 'no'
for (flagsname,flagstype) in [('cxxflags','compiler'), ('cflags','compiler'), ('linkflags','linker')]:
if flagsname in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for %s flags %s' % (flagstype, kw[flagsname])
if not 'errmsg' in kw:
kw['errmsg'] = 'no'
if not 'execute' in kw:
kw['execute'] = False
if not 'errmsg' in kw:
kw['errmsg'] = 'not found'
if not 'okmsg' in kw:
kw['okmsg'] = 'yes'
if not 'code' in kw:
kw['code'] = SNIP3
if not kw.get('success'): kw['success'] = None
assert 'msg' in kw, 'invalid parameters, read http://freehackers.org/~tnagy/wafbook/single.html#config_helpers_c'
@conf
def post_check(self, *k, **kw):
"set the variables after a test was run successfully"
is_success = False
if kw['execute']:
if kw['success'] is not None:
is_success = True
else:
is_success = (kw['success'] == 0)
if 'define_name' in kw:
if 'header_name' in kw or 'function_name' in kw or 'type_name' in kw or 'fragment' in kw:
if kw['execute']:
key = kw['success']
if isinstance(key, str):
if key:
self.define(kw['define_name'], key, quote=kw.get('quote', 1))
else:
self.define_cond(kw['define_name'], True)
else:
self.define_cond(kw['define_name'], False)
else:
self.define_cond(kw['define_name'], is_success)
if is_success and 'uselib_store' in kw:
import cc, cxx
for k in set(cc.g_cc_flag_vars).union(cxx.g_cxx_flag_vars):
lk = k.lower()
# inconsistency: includes -> CPPPATH
if k == 'CPPPATH': lk = 'includes'
if k == 'CXXDEFINES': lk = 'defines'
if k == 'CCDEFINES': lk = 'defines'
if lk in kw:
val = kw[lk]
# remove trailing slash
if isinstance(val, str):
val = val.rstrip(os.path.sep)
self.env.append_unique(k + '_' + kw['uselib_store'], val)
@conf
def check(self, *k, **kw):
# so this will be the generic function
# it will be safer to use check_cxx or check_cc
self.validate_c(kw)
self.check_message_1(kw['msg'])
ret = None
try:
ret = self.run_c_code(*k, **kw)
except Configure.ConfigurationError, e:
self.check_message_2(kw['errmsg'], 'YELLOW')
if 'mandatory' in kw and kw['mandatory']:
if Logs.verbose > 1:
raise
else:
self.fatal('the configuration failed (see %r)' % self.log.name)
else:
kw['success'] = ret
self.check_message_2(self.ret_msg(kw['okmsg'], kw))
self.post_check(*k, **kw)
if not kw.get('execute', False):
return ret == 0
return ret
@conf
def run_c_code(self, *k, **kw):
test_f_name = kw['compile_filename']
k = 0
while k < 10000:
# make certain to use a fresh folder - necessary for win32
dir = os.path.join(self.blddir, '.conf_check_%d' % k)
# if the folder already exists, remove it
try:
shutil.rmtree(dir)
except OSError:
pass
try:
os.stat(dir)
except OSError:
break
k += 1
try:
os.makedirs(dir)
except:
self.fatal('cannot create a configuration test folder %r' % dir)
try:
os.stat(dir)
except:
self.fatal('cannot use the configuration test folder %r' % dir)
bdir = os.path.join(dir, 'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
env = kw['env']
dest = open(os.path.join(dir, test_f_name), 'w')
dest.write(kw['code'])
dest.close()
back = os.path.abspath('.')
bld = Build.BuildContext()
bld.log = self.log
bld.all_envs.update(self.all_envs)
bld.all_envs['default'] = env
bld.lst_variants = bld.all_envs.keys()
bld.load_dirs(dir, bdir)
os.chdir(dir)
bld.rescan(bld.srcnode)
if not 'features' in kw:
# conf.check(features='cc cprogram pyext', ...)
kw['features'] = [kw['compile_mode'], kw['type']] # "cprogram cc"
o = bld(features=kw['features'], source=test_f_name, target='testprog')
for k, v in kw.iteritems():
setattr(o, k, v)
self.log.write("==>\n%s\n<==\n" % kw['code'])
# compile the program
try:
bld.compile()
except Utils.WafError:
ret = Utils.ex_stack()
else:
ret = 0
# chdir before returning
os.chdir(back)
if ret:
self.log.write('command returned %r' % ret)
self.fatal(str(ret))
# if we need to run the program, try to get its result
# keep the name of the program to execute
if kw['execute']:
lastprog = o.link_task.outputs[0].abspath(env)
args = Utils.to_list(kw.get('exec_args', []))
proc = Utils.pproc.Popen([lastprog] + args, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE)
(out, err) = proc.communicate()
w = self.log.write
w(str(out))
w('\n')
w(str(err))
w('\n')
w('returncode %r' % proc.returncode)
w('\n')
if proc.returncode:
self.fatal(Utils.ex_stack())
ret = out
return ret
@conf
def check_cxx(self, *k, **kw):
kw['compiler'] = 'cxx'
return self.check(*k, **kw)
@conf
def check_cc(self, *k, **kw):
kw['compiler'] = 'cc'
return self.check(*k, **kw)
@conf
def define(self, define, value, quote=1):
"""store a single define and its state into an internal list for later
writing to a config header file. Value can only be
a string or int; other types not supported. String
values will appear properly quoted in the generated
header file."""
assert define and isinstance(define, str)
# ordered_dict is for writing the configuration header in order
tbl = self.env[DEFINES] or Utils.ordered_dict()
# the user forgot to tell if the value is quoted or not
if isinstance(value, str):
if quote:
tbl[define] = '"%s"' % repr('"'+value)[2:-1].replace('"', '\\"')
else:
tbl[define] = value
elif isinstance(value, int):
tbl[define] = value
else:
raise TypeError('define %r -> %r must be a string or an int' % (define, value))
# add later to make reconfiguring faster
self.env[DEFINES] = tbl
self.env[define] = value # <- not certain this is necessary
@conf
def undefine(self, define):
"""store a single define and its state into an internal list
for later writing to a config header file"""
assert define and isinstance(define, str)
tbl = self.env[DEFINES] or Utils.ordered_dict()
value = UNDEFINED
tbl[define] = value
# add later to make reconfiguring faster
self.env[DEFINES] = tbl
self.env[define] = value
@conf
def define_cond(self, name, value):
"""Conditionally define a name.
Formally equivalent to: if value: define(name, 1) else: undefine(name)"""
if value:
self.define(name, 1)
else:
self.undefine(name)
@conf
def is_defined(self, key):
defines = self.env[DEFINES]
if not defines:
return False
try:
value = defines[key]
except KeyError:
return False
else:
return value != UNDEFINED
@conf
def get_define(self, define):
"get the value of a previously stored define"
try: return self.env[DEFINES][define]
except KeyError: return None
@conf
def have_define(self, name):
"prefix the define with 'HAVE_' and make sure it has valid characters."
return self.__dict__.get('HAVE_PAT', 'HAVE_%s') % Utils.quote_define_name(name)
@conf
def write_config_header(self, configfile='', env='', guard='', top=False):
"save the defines into a file"
if not configfile: configfile = WAF_CONFIG_H
waf_guard = guard or '_%s_WAF' % Utils.quote_define_name(configfile)
# configfile -> absolute path
# there is a good reason to concatenate first and to split afterwards
if not env: env = self.env
if top:
diff = ''
else:
diff = Utils.diff_path(self.srcdir, self.curdir)
full = os.sep.join([self.blddir, env.variant(), diff, configfile])
full = os.path.normpath(full)
(dir, base) = os.path.split(full)
try: os.makedirs(dir)
except: pass
dest = open(full, 'w')
dest.write('/* Configuration header created by Waf - do not edit */\n')
dest.write('#ifndef %s\n#define %s\n\n' % (waf_guard, waf_guard))
dest.write(self.get_config_header())
# config files are not removed on "waf clean"
env.append_unique(CFG_FILES, os.path.join(diff, configfile))
dest.write('\n#endif /* %s */\n' % waf_guard)
dest.close()
@conf
def get_config_header(self):
"""Fill-in the contents of the config header. Override when you need to write your own config header."""
config_header = []
tbl = self.env[DEFINES] or Utils.ordered_dict()
for key in tbl.allkeys:
value = tbl[key]
if value is None:
config_header.append('#define %s' % key)
elif value is UNDEFINED:
config_header.append('/* #undef %s */' % key)
else:
config_header.append('#define %s %s' % (key, value))
return "\n".join(config_header)
@conftest
def find_cpp(conf):
v = conf.env
cpp = []
if v['CPP']: cpp = v['CPP']
elif 'CPP' in conf.environ: cpp = conf.environ['CPP']
if not cpp: cpp = conf.find_program('cpp', var='CPP')
#if not cpp: cpp = v['CC']
#if not cpp: cpp = v['CXX']
v['CPP'] = cpp
@conftest
def cc_add_flags(conf):
conf.add_os_flags('CFLAGS', 'CCFLAGS')
conf.add_os_flags('CPPFLAGS')
@conftest
def cxx_add_flags(conf):
conf.add_os_flags('CXXFLAGS')
conf.add_os_flags('CPPFLAGS')
@conftest
def link_add_flags(conf):
conf.add_os_flags('LINKFLAGS')
conf.add_os_flags('LDFLAGS', 'LINKFLAGS')
@conftest
def cc_load_tools(conf):
conf.check_tool('cc')
@conftest
def cxx_load_tools(conf):
conf.check_tool('cxx')
ntdb-1.0/buildtools/wafadmin/Tools/cs.py 0000664 0000000 0000000 00000003413 12241515307 0020327 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"C# support"
import TaskGen, Utils, Task, Options
from Logs import error
from TaskGen import before, after, taskgen, feature
flag_vars= ['FLAGS', 'ASSEMBLIES']
@feature('cs')
def init_cs(self):
Utils.def_attrs(self,
flags = '',
assemblies = '',
resources = '',
uselib = '')
@feature('cs')
@after('init_cs')
def apply_uselib_cs(self):
if not self.uselib:
return
global flag_vars
for var in self.to_list(self.uselib):
for v in self.flag_vars:
val = self.env[v+'_'+var]
if val: self.env.append_value(v, val)
@feature('cs')
@after('apply_uselib_cs')
@before('apply_core')
def apply_cs(self):
try: self.meths.remove('apply_core')
except ValueError: pass
# process the flags for the assemblies
for i in self.to_list(self.assemblies) + self.env['ASSEMBLIES']:
self.env.append_unique('_ASSEMBLIES', '/r:'+i)
# process the flags for the resources
for i in self.to_list(self.resources):
self.env.append_unique('_RESOURCES', '/resource:'+i)
# what kind of assembly are we generating?
self.env['_TYPE'] = getattr(self, 'type', 'exe')
# additional flags
self.env.append_unique('_FLAGS', self.to_list(self.flags))
self.env.append_unique('_FLAGS', self.env.FLAGS)
# process the sources
nodes = [self.path.find_resource(i) for i in self.to_list(self.source)]
self.create_task('mcs', nodes, self.path.find_or_declare(self.target))
Task.simple_task_type('mcs', '${MCS} ${SRC} /target:${_TYPE} /out:${TGT} ${_FLAGS} ${_ASSEMBLIES} ${_RESOURCES}', color='YELLOW')
def detect(conf):
csc = getattr(Options.options, 'cscbinary', None)
if csc:
conf.env.MCS = csc
conf.find_program(['gmcs', 'mcs'], var='MCS')
def set_options(opt):
opt.add_option('--with-csc-binary', type='string', dest='cscbinary')
ntdb-1.0/buildtools/wafadmin/Tools/cxx.py 0000664 0000000 0000000 00000006070 12241515307 0020526 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"Base for c++ programs and libraries"
import TaskGen, Task, Utils
from Logs import debug
import ccroot # <- do not remove
from TaskGen import feature, before, extension, after
g_cxx_flag_vars = [
'CXXDEPS', 'FRAMEWORK', 'FRAMEWORKPATH',
'STATICLIB', 'LIB', 'LIBPATH', 'LINKFLAGS', 'RPATH',
'CXXFLAGS', 'CCFLAGS', 'CPPPATH', 'CPPFLAGS', 'CXXDEFINES']
"main cpp variables"
EXT_CXX = ['.cpp', '.cc', '.cxx', '.C', '.c++']
g_cxx_type_vars=['CXXFLAGS', 'LINKFLAGS']
# TODO remove in waf 1.6
class cxx_taskgen(ccroot.ccroot_abstract):
pass
@feature('cxx')
@before('apply_type_vars')
@after('default_cc')
def init_cxx(self):
if not 'cc' in self.features:
self.mappings['.c'] = TaskGen.task_gen.mappings['.cxx']
self.p_flag_vars = set(self.p_flag_vars).union(g_cxx_flag_vars)
self.p_type_vars = set(self.p_type_vars).union(g_cxx_type_vars)
if not self.env['CXX_NAME']:
raise Utils.WafError("At least one compiler (g++, ..) must be selected")
@feature('cxx')
@after('apply_incpaths')
def apply_obj_vars_cxx(self):
"""after apply_incpaths for INC_PATHS"""
env = self.env
app = env.append_unique
cxxpath_st = env['CPPPATH_ST']
# local flags come first
# set the user-defined includes paths
for i in env['INC_PATHS']:
app('_CXXINCFLAGS', cxxpath_st % i.bldpath(env))
app('_CXXINCFLAGS', cxxpath_st % i.srcpath(env))
# set the library include paths
for i in env['CPPPATH']:
app('_CXXINCFLAGS', cxxpath_st % i)
@feature('cxx')
@after('apply_lib_vars')
def apply_defines_cxx(self):
"""after uselib is set for CXXDEFINES"""
self.defines = getattr(self, 'defines', [])
lst = self.to_list(self.defines) + self.to_list(self.env['CXXDEFINES'])
milst = []
# now process the local defines
for defi in lst:
if not defi in milst:
milst.append(defi)
# CXXDEFINES_USELIB
libs = self.to_list(self.uselib)
for l in libs:
val = self.env['CXXDEFINES_'+l]
if val: milst += self.to_list(val)
self.env['DEFLINES'] = ["%s %s" % (x[0], Utils.trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in milst]]
y = self.env['CXXDEFINES_ST']
self.env.append_unique('_CXXDEFFLAGS', [y%x for x in milst])
@extension(EXT_CXX)
def cxx_hook(self, node):
# create the compilation task: cpp or cc
if getattr(self, 'obj_ext', None):
obj_ext = self.obj_ext
else:
obj_ext = '_%d.o' % self.idx
task = self.create_task('cxx', node, node.change_ext(obj_ext))
try:
self.compiled_tasks.append(task)
except AttributeError:
raise Utils.WafError('Have you forgotten to set the feature "cxx" on %s?' % str(self))
return task
cxx_str = '${CXX} ${CXXFLAGS} ${CPPFLAGS} ${_CXXINCFLAGS} ${_CXXDEFFLAGS} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT}'
cls = Task.simple_task_type('cxx', cxx_str, color='GREEN', ext_out='.o', ext_in='.cxx', shell=False)
cls.scan = ccroot.scan
cls.vars.append('CXXDEPS')
link_str = '${LINK_CXX} ${CXXLNK_SRC_F}${SRC} ${CXXLNK_TGT_F}${TGT[0].abspath(env)} ${LINKFLAGS}'
cls = Task.simple_task_type('cxx_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False)
cls.maxjobs = 1
cls.install = Utils.nada
ntdb-1.0/buildtools/wafadmin/Tools/d.py 0000664 0000000 0000000 00000034234 12241515307 0020152 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2007 (dv)
# Thomas Nagy, 2007-2008 (ita)
import os, sys, re, optparse
import ccroot # <- leave this
import TaskGen, Utils, Task, Configure, Logs, Build
from Logs import debug, error
from TaskGen import taskgen, feature, after, before, extension
from Configure import conftest
EXT_D = ['.d', '.di', '.D']
D_METHS = ['apply_core', 'apply_vnum', 'apply_objdeps'] # additional d methods
DLIB = """
version(D_Version2) {
import std.stdio;
int main() {
writefln("phobos2");
return 0;
}
} else {
version(Tango) {
import tango.stdc.stdio;
int main() {
printf("tango");
return 0;
}
} else {
import std.stdio;
int main() {
writefln("phobos1");
return 0;
}
}
}
"""
def filter_comments(filename):
txt = Utils.readf(filename)
i = 0
buf = []
max = len(txt)
begin = 0
while i < max:
c = txt[i]
if c == '"' or c == "'": # skip a string or character literal
buf.append(txt[begin:i])
delim = c
i += 1
while i < max:
c = txt[i]
if c == delim: break
elif c == '\\': # skip the character following backslash
i += 1
i += 1
i += 1
begin = i
elif c == '/': # try to replace a comment with whitespace
buf.append(txt[begin:i])
i += 1
if i == max: break
c = txt[i]
if c == '+': # eat nesting /+ +/ comment
i += 1
nesting = 1
c = None
while i < max:
prev = c
c = txt[i]
if prev == '/' and c == '+':
nesting += 1
c = None
elif prev == '+' and c == '/':
nesting -= 1
if nesting == 0: break
c = None
i += 1
elif c == '*': # eat /* */ comment
i += 1
c = None
while i < max:
prev = c
c = txt[i]
if prev == '*' and c == '/': break
i += 1
elif c == '/': # eat // comment
i += 1
while i < max and txt[i] != '\n':
i += 1
else: # no comment
begin = i - 1
continue
i += 1
begin = i
buf.append(' ')
else:
i += 1
buf.append(txt[begin:])
return buf
class d_parser(object):
def __init__(self, env, incpaths):
#self.code = ''
#self.module = ''
#self.imports = []
self.allnames = []
self.re_module = re.compile("module\s+([^;]+)")
self.re_import = re.compile("import\s+([^;]+)")
self.re_import_bindings = re.compile("([^:]+):(.*)")
self.re_import_alias = re.compile("[^=]+=(.+)")
self.env = env
self.nodes = []
self.names = []
self.incpaths = incpaths
def tryfind(self, filename):
found = 0
for n in self.incpaths:
found = n.find_resource(filename.replace('.', '/') + '.d')
if found:
self.nodes.append(found)
self.waiting.append(found)
break
if not found:
if not filename in self.names:
self.names.append(filename)
def get_strings(self, code):
#self.imports = []
self.module = ''
lst = []
# get the module name (if present)
mod_name = self.re_module.search(code)
if mod_name:
self.module = re.sub('\s+', '', mod_name.group(1)) # strip all whitespaces
# go through the code, have a look at all import occurrences
# first, lets look at anything beginning with "import" and ending with ";"
import_iterator = self.re_import.finditer(code)
if import_iterator:
for import_match in import_iterator:
import_match_str = re.sub('\s+', '', import_match.group(1)) # strip all whitespaces
# does this end with an import bindings declaration?
# (import bindings always terminate the list of imports)
bindings_match = self.re_import_bindings.match(import_match_str)
if bindings_match:
import_match_str = bindings_match.group(1)
# if so, extract the part before the ":" (since the module declaration(s) is/are located there)
# split the matching string into a bunch of strings, separated by a comma
matches = import_match_str.split(',')
for match in matches:
alias_match = self.re_import_alias.match(match)
if alias_match:
# is this an alias declaration? (alias = module name) if so, extract the module name
match = alias_match.group(1)
lst.append(match)
return lst
def start(self, node):
self.waiting = [node]
# while the stack is not empty, add the dependencies
while self.waiting:
nd = self.waiting.pop(0)
self.iter(nd)
def iter(self, node):
path = node.abspath(self.env) # obtain the absolute path
code = "".join(filter_comments(path)) # read the file and filter the comments
names = self.get_strings(code) # obtain the import strings
for x in names:
# optimization
if x in self.allnames: continue
self.allnames.append(x)
# for each name, see if it is like a node or not
self.tryfind(x)
def scan(self):
"look for .d/.di the .d source need"
env = self.env
gruik = d_parser(env, env['INC_PATHS'])
gruik.start(self.inputs[0])
if Logs.verbose:
debug('deps: nodes found for %s: %s %s' % (str(self.inputs[0]), str(gruik.nodes), str(gruik.names)))
#debug("deps found for %s: %s" % (str(node), str(gruik.deps)), 'deps')
return (gruik.nodes, gruik.names)
def get_target_name(self):
"for d programs and libs"
v = self.env
tp = 'program'
for x in self.features:
if x in ['dshlib', 'dstaticlib']:
tp = x.lstrip('d')
return v['D_%s_PATTERN' % tp] % self.target
d_params = {
'dflags': '',
'importpaths':'',
'libs':'',
'libpaths':'',
'generate_headers':False,
}
@feature('d')
@before('apply_type_vars')
def init_d(self):
for x in d_params:
setattr(self, x, getattr(self, x, d_params[x]))
class d_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
# COMPAT
if len(k) > 1:
self.features.append('d' + k[1])
# okay, we borrow a few methods from ccroot
TaskGen.bind_feature('d', D_METHS)
@feature('d')
@before('apply_d_libs')
def init_d(self):
Utils.def_attrs(self,
dflags='',
importpaths='',
libs='',
libpaths='',
uselib='',
uselib_local='',
generate_headers=False, # set to true if you want .di files as well as .o
compiled_tasks=[],
add_objects=[],
link_task=None)
@feature('d')
@after('apply_d_link', 'init_d')
@before('apply_vnum', 'apply_d_vars')
def apply_d_libs(self):
"""after apply_link because of 'link_task'
after default_cc because of the attribute 'uselib'"""
env = self.env
# 1. the case of the libs defined in the project (visit ancestors first)
# the ancestors external libraries (uselib) will be prepended
self.uselib = self.to_list(self.uselib)
names = self.to_list(self.uselib_local)
seen = set([])
tmp = Utils.deque(names) # consume a copy of the list of names
while tmp:
lib_name = tmp.popleft()
# visit dependencies only once
if lib_name in seen:
continue
y = self.name_to_obj(lib_name)
if not y:
raise Utils.WafError('object %r was not found in uselib_local (required by %r)' % (lib_name, self.name))
y.post()
seen.add(lib_name)
# object has ancestors to process (shared libraries): add them to the end of the list
if getattr(y, 'uselib_local', None):
lst = y.to_list(y.uselib_local)
if 'dshlib' in y.features or 'dprogram' in y.features:
lst = [x for x in lst if not 'dstaticlib' in self.name_to_obj(x).features]
tmp.extend(lst)
# link task and flags
if getattr(y, 'link_task', None):
link_name = y.target[y.target.rfind(os.sep) + 1:]
if 'dstaticlib' in y.features or 'dshlib' in y.features:
env.append_unique('DLINKFLAGS', env.DLIB_ST % link_name)
env.append_unique('DLINKFLAGS', env.DLIBPATH_ST % y.link_task.outputs[0].parent.bldpath(env))
# the order
self.link_task.set_run_after(y.link_task)
# for the recompilation
dep_nodes = getattr(self.link_task, 'dep_nodes', [])
self.link_task.dep_nodes = dep_nodes + y.link_task.outputs
# add ancestors uselib too - but only propagate those that have no staticlib
for v in self.to_list(y.uselib):
if not v in self.uselib:
self.uselib.insert(0, v)
# if the library task generator provides 'export_incdirs', add to the include path
# the export_incdirs must be a list of paths relative to the other library
if getattr(y, 'export_incdirs', None):
for x in self.to_list(y.export_incdirs):
node = y.path.find_dir(x)
if not node:
raise Utils.WafError('object %r: invalid folder %r in export_incdirs' % (y.target, x))
self.env.append_unique('INC_PATHS', node)
@feature('dprogram', 'dshlib', 'dstaticlib')
@after('apply_core')
def apply_d_link(self):
link = getattr(self, 'link', None)
if not link:
if 'dstaticlib' in self.features: link = 'static_link'
else: link = 'd_link'
outputs = [t.outputs[0] for t in self.compiled_tasks]
self.link_task = self.create_task(link, outputs, self.path.find_or_declare(get_target_name(self)))
@feature('d')
@after('apply_core')
def apply_d_vars(self):
env = self.env
dpath_st = env['DPATH_ST']
lib_st = env['DLIB_ST']
libpath_st = env['DLIBPATH_ST']
importpaths = self.to_list(self.importpaths)
libpaths = []
libs = []
uselib = self.to_list(self.uselib)
for i in uselib:
if env['DFLAGS_' + i]:
env.append_unique('DFLAGS', env['DFLAGS_' + i])
for x in self.features:
if not x in ['dprogram', 'dstaticlib', 'dshlib']:
continue
x.lstrip('d')
d_shlib_dflags = env['D_' + x + '_DFLAGS']
if d_shlib_dflags:
env.append_unique('DFLAGS', d_shlib_dflags)
# add import paths
for i in uselib:
if env['DPATH_' + i]:
for entry in self.to_list(env['DPATH_' + i]):
if not entry in importpaths:
importpaths.append(entry)
# now process the import paths
for path in importpaths:
if os.path.isabs(path):
env.append_unique('_DIMPORTFLAGS', dpath_st % path)
else:
node = self.path.find_dir(path)
self.env.append_unique('INC_PATHS', node)
env.append_unique('_DIMPORTFLAGS', dpath_st % node.srcpath(env))
env.append_unique('_DIMPORTFLAGS', dpath_st % node.bldpath(env))
# add library paths
for i in uselib:
if env['LIBPATH_' + i]:
for entry in self.to_list(env['LIBPATH_' + i]):
if not entry in libpaths:
libpaths.append(entry)
libpaths = self.to_list(self.libpaths) + libpaths
# now process the library paths
# apply same path manipulation as used with import paths
for path in libpaths:
if not os.path.isabs(path):
node = self.path.find_resource(path)
if not node:
raise Utils.WafError('could not find libpath %r from %r' % (path, self))
path = node.abspath(self.env)
env.append_unique('DLINKFLAGS', libpath_st % path)
# add libraries
for i in uselib:
if env['LIB_' + i]:
for entry in self.to_list(env['LIB_' + i]):
if not entry in libs:
libs.append(entry)
libs.extend(self.to_list(self.libs))
# process user flags
for flag in self.to_list(self.dflags):
env.append_unique('DFLAGS', flag)
# now process the libraries
for lib in libs:
env.append_unique('DLINKFLAGS', lib_st % lib)
# add linker flags
for i in uselib:
dlinkflags = env['DLINKFLAGS_' + i]
if dlinkflags:
for linkflag in dlinkflags:
env.append_unique('DLINKFLAGS', linkflag)
@feature('dshlib')
@after('apply_d_vars')
def add_shlib_d_flags(self):
for linkflag in self.env['D_shlib_LINKFLAGS']:
self.env.append_unique('DLINKFLAGS', linkflag)
@extension(EXT_D)
def d_hook(self, node):
# create the compilation task: cpp or cc
task = self.create_task(self.generate_headers and 'd_with_header' or 'd')
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
task.inputs = [node]
task.outputs = [node.change_ext(obj_ext)]
self.compiled_tasks.append(task)
if self.generate_headers:
header_node = node.change_ext(self.env['DHEADER_ext'])
task.outputs += [header_node]
d_str = '${D_COMPILER} ${DFLAGS} ${_DIMPORTFLAGS} ${D_SRC_F}${SRC} ${D_TGT_F}${TGT}'
d_with_header_str = '${D_COMPILER} ${DFLAGS} ${_DIMPORTFLAGS} \
${D_HDR_F}${TGT[1].bldpath(env)} \
${D_SRC_F}${SRC} \
${D_TGT_F}${TGT[0].bldpath(env)}'
link_str = '${D_LINKER} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F}${TGT} ${DLINKFLAGS}'
def override_exec(cls):
"""stupid dmd wants -of stuck to the file name"""
old_exec = cls.exec_command
def exec_command(self, *k, **kw):
if isinstance(k[0], list):
lst = k[0]
for i in xrange(len(lst)):
if lst[i] == '-of':
del lst[i]
lst[i] = '-of' + lst[i]
break
return old_exec(self, *k, **kw)
cls.exec_command = exec_command
cls = Task.simple_task_type('d', d_str, 'GREEN', before='static_link d_link', shell=False)
cls.scan = scan
override_exec(cls)
cls = Task.simple_task_type('d_with_header', d_with_header_str, 'GREEN', before='static_link d_link', shell=False)
override_exec(cls)
cls = Task.simple_task_type('d_link', link_str, color='YELLOW', shell=False)
override_exec(cls)
# for feature request #104
@taskgen
def generate_header(self, filename, install_path):
if not hasattr(self, 'header_lst'): self.header_lst = []
self.meths.append('process_header')
self.header_lst.append([filename, install_path])
@before('apply_core')
def process_header(self):
env = self.env
for i in getattr(self, 'header_lst', []):
node = self.path.find_resource(i[0])
if not node:
raise Utils.WafError('file not found on d obj '+i[0])
task = self.create_task('d_header')
task.set_inputs(node)
task.set_outputs(node.change_ext('.di'))
d_header_str = '${D_COMPILER} ${D_HEADER} ${SRC}'
Task.simple_task_type('d_header', d_header_str, color='BLUE', shell=False)
@conftest
def d_platform_flags(conf):
v = conf.env
binfmt = v.DEST_BINFMT or Utils.unversioned_sys_platform_to_binary_format(
v.DEST_OS or Utils.unversioned_sys_platform())
if binfmt == 'pe':
v['D_program_PATTERN'] = '%s.exe'
v['D_shlib_PATTERN'] = 'lib%s.dll'
v['D_staticlib_PATTERN'] = 'lib%s.a'
else:
v['D_program_PATTERN'] = '%s'
v['D_shlib_PATTERN'] = 'lib%s.so'
v['D_staticlib_PATTERN'] = 'lib%s.a'
@conftest
def check_dlibrary(conf):
ret = conf.check_cc(features='d dprogram', fragment=DLIB, mandatory=True, compile_filename='test.d', execute=True)
conf.env.DLIBRARY = ret.strip()
# quick test #
if __name__ == "__main__":
#Logs.verbose = 2
try: arg = sys.argv[1]
except IndexError: arg = "file.d"
print("".join(filter_comments(arg)))
# TODO
paths = ['.']
#gruik = filter()
#gruik.start(arg)
#code = "".join(gruik.buf)
#print "we have found the following code"
#print code
#print "now parsing"
#print "-------------------------------------------"
"""
parser_ = d_parser()
parser_.start(arg)
print "module: %s" % parser_.module
print "imports: ",
for imp in parser_.imports:
print imp + " ",
print
"""
ntdb-1.0/buildtools/wafadmin/Tools/dbus.py 0000664 0000000 0000000 00000001760 12241515307 0020662 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Ali Sabil, 2007
import Task, Utils
from TaskGen import taskgen, before, after, feature
@taskgen
def add_dbus_file(self, filename, prefix, mode):
if not hasattr(self, 'dbus_lst'):
self.dbus_lst = []
self.meths.append('process_dbus')
self.dbus_lst.append([filename, prefix, mode])
@before('apply_core')
def process_dbus(self):
for filename, prefix, mode in getattr(self, 'dbus_lst', []):
node = self.path.find_resource(filename)
if not node:
raise Utils.WafError('file not found ' + filename)
tsk = self.create_task('dbus_binding_tool', node, node.change_ext('.h'))
tsk.env.DBUS_BINDING_TOOL_PREFIX = prefix
tsk.env.DBUS_BINDING_TOOL_MODE = mode
Task.simple_task_type('dbus_binding_tool',
'${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}',
color='BLUE', before='cc')
def detect(conf):
dbus_binding_tool = conf.find_program('dbus-binding-tool', var='DBUS_BINDING_TOOL')
ntdb-1.0/buildtools/wafadmin/Tools/dmd.py 0000664 0000000 0000000 00000003055 12241515307 0020470 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2007 (dv)
# Thomas Nagy, 2008 (ita)
import sys
import Utils, ar
from Configure import conftest
@conftest
def find_dmd(conf):
conf.find_program(['dmd', 'ldc'], var='D_COMPILER', mandatory=True)
@conftest
def common_flags_ldc(conf):
v = conf.env
v['DFLAGS'] = ['-d-version=Posix']
v['DLINKFLAGS'] = []
v['D_shlib_DFLAGS'] = ['-relocation-model=pic']
@conftest
def common_flags_dmd(conf):
v = conf.env
# _DFLAGS _DIMPORTFLAGS
# Compiler is dmd so 'gdc' part will be ignored, just
# ensure key is there, so wscript can append flags to it
v['DFLAGS'] = ['-version=Posix']
v['D_SRC_F'] = ''
v['D_TGT_F'] = ['-c', '-of']
v['DPATH_ST'] = '-I%s' # template for adding import paths
# linker
v['D_LINKER'] = v['D_COMPILER']
v['DLNK_SRC_F'] = ''
v['DLNK_TGT_F'] = '-of'
v['DLIB_ST'] = '-L-l%s' # template for adding libs
v['DLIBPATH_ST'] = '-L-L%s' # template for adding libpaths
# linker debug levels
v['DFLAGS_OPTIMIZED'] = ['-O']
v['DFLAGS_DEBUG'] = ['-g', '-debug']
v['DFLAGS_ULTRADEBUG'] = ['-g', '-debug']
v['DLINKFLAGS'] = ['-quiet']
v['D_shlib_DFLAGS'] = ['-fPIC']
v['D_shlib_LINKFLAGS'] = ['-L-shared']
v['DHEADER_ext'] = '.di'
v['D_HDR_F'] = ['-H', '-Hf']
def detect(conf):
conf.find_dmd()
conf.check_tool('ar')
conf.check_tool('d')
conf.common_flags_dmd()
conf.d_platform_flags()
if conf.env.D_COMPILER.find('ldc') > -1:
conf.common_flags_ldc()
ntdb-1.0/buildtools/wafadmin/Tools/flex.py 0000664 0000000 0000000 00000000727 12241515307 0020665 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# John O'Meara, 2006
# Thomas Nagy, 2006-2008
"Flex processing"
import TaskGen
def decide_ext(self, node):
if 'cxx' in self.features: return '.lex.cc'
else: return '.lex.c'
TaskGen.declare_chain(
name = 'flex',
rule = '${FLEX} -o${TGT} ${FLEXFLAGS} ${SRC}',
ext_in = '.l',
ext_out = '.c .cxx',
decider = decide_ext
)
def detect(conf):
conf.find_program('flex', var='FLEX', mandatory=True)
conf.env['FLEXFLAGS'] = ''
ntdb-1.0/buildtools/wafadmin/Tools/gas.py 0000664 0000000 0000000 00000002125 12241515307 0020473 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2008 (ita)
"as and gas"
import os, sys
import Task
from TaskGen import extension, taskgen, after, before
EXT_ASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP']
as_str = '${AS} ${ASFLAGS} ${_ASINCFLAGS} ${SRC} -o ${TGT}'
Task.simple_task_type('asm', as_str, 'PINK', ext_out='.o', shell=False)
@extension(EXT_ASM)
def asm_hook(self, node):
# create the compilation task: cpp or cc
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
task = self.create_task('asm', node, node.change_ext(obj_ext))
self.compiled_tasks.append(task)
self.meths.append('asm_incflags')
@after('apply_obj_vars_cc')
@after('apply_obj_vars_cxx')
@before('apply_link')
def asm_incflags(self):
self.env.append_value('_ASINCFLAGS', self.env.ASINCFLAGS)
var = ('cxx' in self.features) and 'CXX' or 'CC'
self.env.append_value('_ASINCFLAGS', self.env['_%sINCFLAGS' % var])
def detect(conf):
conf.find_program(['gas', 'as'], var='AS')
if not conf.env.AS: conf.env.AS = conf.env.CC
#conf.env.ASFLAGS = ['-c'] <- may be necesary for .S files
ntdb-1.0/buildtools/wafadmin/Tools/gcc.py 0000664 0000000 0000000 00000007505 12241515307 0020464 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2008 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
import os, sys
import Configure, Options, Utils
import ccroot, ar
from Configure import conftest
@conftest
def find_gcc(conf):
cc = conf.find_program(['gcc', 'cc'], var='CC', mandatory=True)
cc = conf.cmd_to_list(cc)
ccroot.get_cc_version(conf, cc, gcc=True)
conf.env.CC_NAME = 'gcc'
conf.env.CC = cc
@conftest
def gcc_common_flags(conf):
v = conf.env
# CPPFLAGS CCDEFINES _CCINCFLAGS _CCDEFFLAGS
v['CCFLAGS_DEBUG'] = ['-g']
v['CCFLAGS_RELEASE'] = ['-O2']
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o', ''] # shell hack for -MD
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['RPATH_ST'] = '-Wl,-rpath,%s'
v['CCDEFINES_ST'] = '-D%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Wl,-Bdynamic'
v['STATICLIB_MARKER'] = '-Wl,-Bstatic'
v['FULLSTATIC_MARKER'] = '-static'
# program
v['program_PATTERN'] = '%s'
# shared library
v['shlib_CCFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro
v['shlib_LINKFLAGS'] = ['-shared']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
v['staticlib_LINKFLAGS'] = ['-Wl,-Bstatic']
v['staticlib_PATTERN'] = 'lib%s.a'
# osx stuff
v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup']
v['CCFLAGS_MACBUNDLE'] = ['-fPIC']
v['macbundle_PATTERN'] = '%s.bundle'
@conftest
def gcc_modifier_win32(conf):
v = conf.env
v['program_PATTERN'] = '%s.exe'
v['shlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = 'lib%s.dll.a'
v['IMPLIB_ST'] = '-Wl,--out-implib,%s'
dest_arch = v['DEST_CPU']
v['shlib_CCFLAGS'] = ['-DPIC']
v.append_value('shlib_CCFLAGS', '-DDLL_EXPORT') # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea
# Auto-import is enabled by default even without this option,
# but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
# that the linker emits otherwise.
v.append_value('LINKFLAGS', '-Wl,--enable-auto-import')
@conftest
def gcc_modifier_cygwin(conf):
gcc_modifier_win32(conf)
v = conf.env
v['shlib_PATTERN'] = 'cyg%s.dll'
v.append_value('shlib_LINKFLAGS', '-Wl,--enable-auto-image-base')
@conftest
def gcc_modifier_darwin(conf):
v = conf.env
v['shlib_CCFLAGS'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
v['shlib_LINKFLAGS'] = ['-dynamiclib']
v['shlib_PATTERN'] = 'lib%s.dylib'
v['staticlib_LINKFLAGS'] = []
v['SHLIB_MARKER'] = ''
v['STATICLIB_MARKER'] = ''
v['SONAME_ST'] = ''
@conftest
def gcc_modifier_aix(conf):
v = conf.env
v['program_LINKFLAGS'] = ['-Wl,-brtl']
v['shlib_LINKFLAGS'] = ['-shared','-Wl,-brtl,-bexpfull']
v['SHLIB_MARKER'] = ''
@conftest
def gcc_modifier_platform(conf):
# * set configurations specific for a platform.
# * the destination platform is detected automatically by looking at the macros the compiler predefines,
# and if it's not recognised, it fallbacks to sys.platform.
dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
gcc_modifier_func = globals().get('gcc_modifier_' + dest_os)
if gcc_modifier_func:
gcc_modifier_func(conf)
def detect(conf):
conf.find_gcc()
conf.find_cpp()
conf.find_ar()
conf.gcc_common_flags()
conf.gcc_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
ntdb-1.0/buildtools/wafadmin/Tools/gdc.py 0000664 0000000 0000000 00000002270 12241515307 0020457 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2007 (dv)
import sys
import Utils, ar
from Configure import conftest
@conftest
def find_gdc(conf):
conf.find_program('gdc', var='D_COMPILER', mandatory=True)
@conftest
def common_flags_gdc(conf):
v = conf.env
# _DFLAGS _DIMPORTFLAGS
# for mory info about the meaning of this dict see dmd.py
v['DFLAGS'] = []
v['D_SRC_F'] = ''
v['D_TGT_F'] = ['-c', '-o', '']
v['DPATH_ST'] = '-I%s' # template for adding import paths
# linker
v['D_LINKER'] = v['D_COMPILER']
v['DLNK_SRC_F'] = ''
v['DLNK_TGT_F'] = ['-o', '']
v['DLIB_ST'] = '-l%s' # template for adding libs
v['DLIBPATH_ST'] = '-L%s' # template for adding libpaths
# debug levels
v['DLINKFLAGS'] = []
v['DFLAGS_OPTIMIZED'] = ['-O3']
v['DFLAGS_DEBUG'] = ['-O0']
v['DFLAGS_ULTRADEBUG'] = ['-O0']
v['D_shlib_DFLAGS'] = []
v['D_shlib_LINKFLAGS'] = ['-shared']
v['DHEADER_ext'] = '.di'
v['D_HDR_F'] = '-fintfc -fintfc-file='
def detect(conf):
conf.find_gdc()
conf.check_tool('ar')
conf.check_tool('d')
conf.common_flags_gdc()
conf.d_platform_flags()
ntdb-1.0/buildtools/wafadmin/Tools/glib2.py 0000664 0000000 0000000 00000011660 12241515307 0020724 0 ustar 00root root 0000000 0000000 #! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2008 (ita)
"GLib2 support"
import Task, Utils
from TaskGen import taskgen, before, after, feature
#
# glib-genmarshal
#
@taskgen
def add_marshal_file(self, filename, prefix):
if not hasattr(self, 'marshal_list'):
self.marshal_list = []
self.meths.append('process_marshal')
self.marshal_list.append((filename, prefix))
@before('apply_core')
def process_marshal(self):
for f, prefix in getattr(self, 'marshal_list', []):
node = self.path.find_resource(f)
if not node:
raise Utils.WafError('file not found %r' % f)
h_node = node.change_ext('.h')
c_node = node.change_ext('.c')
task = self.create_task('glib_genmarshal', node, [h_node, c_node])
task.env.GLIB_GENMARSHAL_PREFIX = prefix
self.allnodes.append(c_node)
def genmarshal_func(self):
bld = self.inputs[0].__class__.bld
get = self.env.get_flat
cmd1 = "%s %s --prefix=%s --header > %s" % (
get('GLIB_GENMARSHAL'),
self.inputs[0].srcpath(self.env),
get('GLIB_GENMARSHAL_PREFIX'),
self.outputs[0].abspath(self.env)
)
ret = bld.exec_command(cmd1)
if ret: return ret
#print self.outputs[1].abspath(self.env)
f = open(self.outputs[1].abspath(self.env), 'wb')
c = '''#include "%s"\n''' % self.outputs[0].name
f.write(c)
f.close()
cmd2 = "%s %s --prefix=%s --body >> %s" % (
get('GLIB_GENMARSHAL'),
self.inputs[0].srcpath(self.env),
get('GLIB_GENMARSHAL_PREFIX'),
self.outputs[1].abspath(self.env)
)
ret = Utils.exec_command(cmd2)
if ret: return ret
#
# glib-mkenums
#
@taskgen
def add_enums_from_template(self, source='', target='', template='', comments=''):
if not hasattr(self, 'enums_list'):
self.enums_list = []
self.meths.append('process_enums')
self.enums_list.append({'source': source,
'target': target,
'template': template,
'file-head': '',
'file-prod': '',
'file-tail': '',
'enum-prod': '',
'value-head': '',
'value-prod': '',
'value-tail': '',
'comments': comments})
@taskgen
def add_enums(self, source='', target='',
file_head='', file_prod='', file_tail='', enum_prod='',
value_head='', value_prod='', value_tail='', comments=''):
if not hasattr(self, 'enums_list'):
self.enums_list = []
self.meths.append('process_enums')
self.enums_list.append({'source': source,
'template': '',
'target': target,
'file-head': file_head,
'file-prod': file_prod,
'file-tail': file_tail,
'enum-prod': enum_prod,
'value-head': value_head,
'value-prod': value_prod,
'value-tail': value_tail,
'comments': comments})
@before('apply_core')
def process_enums(self):
for enum in getattr(self, 'enums_list', []):
task = self.create_task('glib_mkenums')
env = task.env
inputs = []
# process the source
source_list = self.to_list(enum['source'])
if not source_list:
raise Utils.WafError('missing source ' + str(enum))
source_list = [self.path.find_resource(k) for k in source_list]
inputs += source_list
env['GLIB_MKENUMS_SOURCE'] = [k.srcpath(env) for k in source_list]
# find the target
if not enum['target']:
raise Utils.WafError('missing target ' + str(enum))
tgt_node = self.path.find_or_declare(enum['target'])
if tgt_node.name.endswith('.c'):
self.allnodes.append(tgt_node)
env['GLIB_MKENUMS_TARGET'] = tgt_node.abspath(env)
options = []
if enum['template']: # template, if provided
template_node = self.path.find_resource(enum['template'])
options.append('--template %s' % (template_node.abspath(env)))
inputs.append(template_node)
params = {'file-head' : '--fhead',
'file-prod' : '--fprod',
'file-tail' : '--ftail',
'enum-prod' : '--eprod',
'value-head' : '--vhead',
'value-prod' : '--vprod',
'value-tail' : '--vtail',
'comments': '--comments'}
for param, option in params.iteritems():
if enum[param]:
options.append('%s %r' % (option, enum[param]))
env['GLIB_MKENUMS_OPTIONS'] = ' '.join(options)
# update the task instance
task.set_inputs(inputs)
task.set_outputs(tgt_node)
Task.task_type_from_func('glib_genmarshal', func=genmarshal_func, vars=['GLIB_GENMARSHAL_PREFIX', 'GLIB_GENMARSHAL'],
color='BLUE', before='cc cxx')
Task.simple_task_type('glib_mkenums',
'${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}',
color='PINK', before='cc cxx')
def detect(conf):
glib_genmarshal = conf.find_program('glib-genmarshal', var='GLIB_GENMARSHAL')
mk_enums_tool = conf.find_program('glib-mkenums', var='GLIB_MKENUMS')
ntdb-1.0/buildtools/wafadmin/Tools/gnome.py 0000664 0000000 0000000 00000017101 12241515307 0021026 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2008 (ita)
"Gnome support"
import os, re
import TaskGen, Utils, Runner, Task, Build, Options, Logs
import cc
from Logs import error
from TaskGen import taskgen, before, after, feature
n1_regexp = re.compile('(.*)', re.M)
n2_regexp = re.compile('(.*)', re.M)
def postinstall_schemas(prog_name):
if Build.bld.is_install:
dir = Build.bld.get_install_path('${PREFIX}/etc/gconf/schemas/%s.schemas' % prog_name)
if not Options.options.destdir:
# add the gconf schema
Utils.pprint('YELLOW', 'Installing GConf schema')
command = 'gconftool-2 --install-schema-file=%s 1> /dev/null' % dir
ret = Utils.exec_command(command)
else:
Utils.pprint('YELLOW', 'GConf schema not installed. After install, run this:')
Utils.pprint('YELLOW', 'gconftool-2 --install-schema-file=%s' % dir)
def postinstall_icons():
dir = Build.bld.get_install_path('${DATADIR}/icons/hicolor')
if Build.bld.is_install:
if not Options.options.destdir:
# update the pixmap cache directory
Utils.pprint('YELLOW', "Updating Gtk icon cache.")
command = 'gtk-update-icon-cache -q -f -t %s' % dir
ret = Utils.exec_command(command)
else:
Utils.pprint('YELLOW', 'Icon cache not updated. After install, run this:')
Utils.pprint('YELLOW', 'gtk-update-icon-cache -q -f -t %s' % dir)
def postinstall_scrollkeeper(prog_name):
if Build.bld.is_install:
# now the scrollkeeper update if we can write to the log file
if os.access('/var/log/scrollkeeper.log', os.W_OK):
dir1 = Build.bld.get_install_path('${PREFIX}/var/scrollkeeper')
dir2 = Build.bld.get_install_path('${DATADIR}/omf/%s' % prog_name)
command = 'scrollkeeper-update -q -p %s -o %s' % (dir1, dir2)
ret = Utils.exec_command(command)
def postinstall(prog_name='myapp', schemas=1, icons=1, scrollkeeper=1):
if schemas: postinstall_schemas(prog_name)
if icons: postinstall_icons()
if scrollkeeper: postinstall_scrollkeeper(prog_name)
# OBSOLETE
class gnome_doc_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('gnome_doc')
def init_gnome_doc(self):
self.default_install_path = '${PREFIX}/share'
@feature('gnome_doc')
@after('init_gnome_doc')
def apply_gnome_doc(self):
self.env['APPNAME'] = self.doc_module
lst = self.to_list(self.doc_linguas)
bld = self.bld
lst.append('C')
for x in lst:
if not x == 'C':
tsk = self.create_task('xml2po')
node = self.path.find_resource(x+'/'+x+'.po')
src = self.path.find_resource('C/%s.xml' % self.doc_module)
out = self.path.find_or_declare('%s/%s.xml' % (x, self.doc_module))
tsk.set_inputs([node, src])
tsk.set_outputs(out)
else:
out = self.path.find_resource('%s/%s.xml' % (x, self.doc_module))
tsk2 = self.create_task('xsltproc2po')
out2 = self.path.find_or_declare('%s/%s-%s.omf' % (x, self.doc_module, x))
tsk2.set_outputs(out2)
node = self.path.find_resource(self.doc_module+".omf.in")
tsk2.inputs = [node, out]
tsk2.run_after.append(tsk)
if bld.is_install:
path = self.install_path + '/gnome/help/%s/%s' % (self.doc_module, x)
bld.install_files(self.install_path + '/omf', out2, env=self.env)
for y in self.to_list(self.doc_figures):
try:
os.stat(self.path.abspath() + '/' + x + '/' + y)
bld.install_as(path + '/' + y, self.path.abspath() + '/' + x + '/' + y)
except:
bld.install_as(path + '/' + y, self.path.abspath() + '/C/' + y)
bld.install_as(path + '/%s.xml' % self.doc_module, out.abspath(self.env))
if x == 'C':
xmls = self.to_list(self.doc_includes)
xmls.append(self.doc_entities)
for z in xmls:
out = self.path.find_resource('%s/%s' % (x, z))
bld.install_as(path + '/%s' % z, out.abspath(self.env))
# OBSOLETE
class xml_to_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('xml_to')
def init_xml_to(self):
Utils.def_attrs(self,
source = 'xmlfile',
xslt = 'xlsltfile',
target = 'hey',
default_install_path = '${PREFIX}',
task_created = None)
@feature('xml_to')
@after('init_xml_to')
def apply_xml_to(self):
xmlfile = self.path.find_resource(self.source)
xsltfile = self.path.find_resource(self.xslt)
tsk = self.create_task('xmlto', [xmlfile, xsltfile], xmlfile.change_ext('html'))
tsk.install_path = self.install_path
def sgml_scan(self):
node = self.inputs[0]
env = self.env
variant = node.variant(env)
fi = open(node.abspath(env), 'r')
content = fi.read()
fi.close()
# we should use a sgml parser :-/
name = n1_regexp.findall(content)[0]
num = n2_regexp.findall(content)[0]
doc_name = name+'.'+num
if not self.outputs:
self.outputs = [self.generator.path.find_or_declare(doc_name)]
return ([], [doc_name])
class gnome_sgml2man_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('gnome_sgml2man')
def apply_gnome_sgml2man(self):
"""
we could make it more complicated, but for now we just scan the document each time
"""
assert(getattr(self, 'appname', None))
def install_result(task):
out = task.outputs[0]
name = out.name
ext = name[-1]
env = task.env
self.bld.install_files('${DATADIR}/man/man%s/' % ext, out, env)
self.bld.rescan(self.path)
for name in self.bld.cache_dir_contents[self.path.id]:
base, ext = os.path.splitext(name)
if ext != '.sgml': continue
task = self.create_task('sgml2man')
task.set_inputs(self.path.find_resource(name))
task.task_generator = self
if self.bld.is_install: task.install = install_result
# no outputs, the scanner does it
# no caching for now, this is not a time-critical feature
# in the future the scanner can be used to do more things (find dependencies, etc)
task.scan()
cls = Task.simple_task_type('sgml2man', '${SGML2MAN} -o ${TGT[0].bld_dir(env)} ${SRC} > /dev/null', color='BLUE')
cls.scan = sgml_scan
cls.quiet = 1
Task.simple_task_type('xmlto', '${XMLTO} html -m ${SRC[1].abspath(env)} ${SRC[0].abspath(env)}')
Task.simple_task_type('xml2po', '${XML2PO} ${XML2POFLAGS} ${SRC} > ${TGT}', color='BLUE')
# how do you expect someone to understand this?!
xslt_magic = """${XSLTPROC2PO} -o ${TGT[0].abspath(env)} \
--stringparam db2omf.basename ${APPNAME} \
--stringparam db2omf.format docbook \
--stringparam db2omf.lang ${TGT[0].abspath(env)[:-4].split('-')[-1]} \
--stringparam db2omf.dtd '-//OASIS//DTD DocBook XML V4.3//EN' \
--stringparam db2omf.omf_dir ${PREFIX}/share/omf \
--stringparam db2omf.help_dir ${PREFIX}/share/gnome/help \
--stringparam db2omf.omf_in ${SRC[0].abspath(env)} \
--stringparam db2omf.scrollkeeper_cl ${SCROLLKEEPER_DATADIR}/Templates/C/scrollkeeper_cl.xml \
${DB2OMF} ${SRC[1].abspath(env)}"""
#--stringparam db2omf.dtd '-//OASIS//DTD DocBook XML V4.3//EN' \
Task.simple_task_type('xsltproc2po', xslt_magic, color='BLUE')
def detect(conf):
conf.check_tool('gnu_dirs glib2 dbus')
sgml2man = conf.find_program('docbook2man', var='SGML2MAN')
def getstr(varname):
return getattr(Options.options, varname, '')
# addefine also sets the variable to the env
conf.define('GNOMELOCALEDIR', os.path.join(conf.env['DATADIR'], 'locale'))
xml2po = conf.find_program('xml2po', var='XML2PO')
xsltproc2po = conf.find_program('xsltproc', var='XSLTPROC2PO')
conf.env['XML2POFLAGS'] = '-e -p'
conf.env['SCROLLKEEPER_DATADIR'] = Utils.cmd_output("scrollkeeper-config --pkgdatadir", silent=1).strip()
conf.env['DB2OMF'] = Utils.cmd_output("/usr/bin/pkg-config --variable db2omf gnome-doc-utils", silent=1).strip()
def set_options(opt):
opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
ntdb-1.0/buildtools/wafadmin/Tools/gnu_dirs.py 0000664 0000000 0000000 00000010034 12241515307 0021531 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Ali Sabil, 2007
"""
To use this module do not forget to call
opt.tool_options('gnu_dirs')
AND
conf.check_tool('gnu_dirs')
Add options for the standard GNU directories, this tool will add the options
found in autotools, and will update the environment with the following
installation variables:
* PREFIX : architecture-independent files [/usr/local]
* EXEC_PREFIX : architecture-dependent files [PREFIX]
* BINDIR : user executables [EXEC_PREFIX/bin]
* SBINDIR : user executables [EXEC_PREFIX/sbin]
* LIBEXECDIR : program executables [EXEC_PREFIX/libexec]
* SYSCONFDIR : read-only single-machine data [PREFIX/etc]
* SHAREDSTATEDIR : modifiable architecture-independent data [PREFIX/com]
* LOCALSTATEDIR : modifiable single-machine data [PREFIX/var]
* LIBDIR : object code libraries [EXEC_PREFIX/lib]
* INCLUDEDIR : C header files [PREFIX/include]
* OLDINCLUDEDIR : C header files for non-gcc [/usr/include]
* DATAROOTDIR : read-only arch.-independent data root [PREFIX/share]
* DATADIR : read-only architecture-independent data [DATAROOTDIR]
* INFODIR : info documentation [DATAROOTDIR/info]
* LOCALEDIR : locale-dependent data [DATAROOTDIR/locale]
* MANDIR : man documentation [DATAROOTDIR/man]
* DOCDIR : documentation root [DATAROOTDIR/doc/telepathy-glib]
* HTMLDIR : html documentation [DOCDIR]
* DVIDIR : dvi documentation [DOCDIR]
* PDFDIR : pdf documentation [DOCDIR]
* PSDIR : ps documentation [DOCDIR]
"""
import Utils, Options
_options = [x.split(', ') for x in '''
bindir, user executables, ${EXEC_PREFIX}/bin
sbindir, system admin executables, ${EXEC_PREFIX}/sbin
libexecdir, program executables, ${EXEC_PREFIX}/libexec
sysconfdir, read-only single-machine data, ${PREFIX}/etc
sharedstatedir, modifiable architecture-independent data, ${PREFIX}/com
localstatedir, modifiable single-machine data, ${PREFIX}/var
libdir, object code libraries, ${EXEC_PREFIX}/lib
includedir, C header files, ${PREFIX}/include
oldincludedir, C header files for non-gcc, /usr/include
datarootdir, read-only arch.-independent data root, ${PREFIX}/share
datadir, read-only architecture-independent data, ${DATAROOTDIR}
infodir, info documentation, ${DATAROOTDIR}/info
localedir, locale-dependent data, ${DATAROOTDIR}/locale
mandir, man documentation, ${DATAROOTDIR}/man
docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE}
htmldir, html documentation, ${DOCDIR}
dvidir, dvi documentation, ${DOCDIR}
pdfdir, pdf documentation, ${DOCDIR}
psdir, ps documentation, ${DOCDIR}
'''.split('\n') if x]
def detect(conf):
def get_param(varname, default):
return getattr(Options.options, varname, '') or default
env = conf.env
env['EXEC_PREFIX'] = get_param('EXEC_PREFIX', env['PREFIX'])
env['PACKAGE'] = Utils.g_module.APPNAME
complete = False
iter = 0
while not complete and iter < len(_options) + 1:
iter += 1
complete = True
for name, help, default in _options:
name = name.upper()
if not env[name]:
try:
env[name] = Utils.subst_vars(get_param(name, default), env)
except TypeError:
complete = False
if not complete:
lst = [name for name, _, _ in _options if not env[name.upper()]]
raise Utils.WafError('Variable substitution failure %r' % lst)
def set_options(opt):
inst_dir = opt.add_option_group('Installation directories',
'By default, "waf install" will put the files in\
"/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\
than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"')
for k in ('--prefix', '--destdir'):
option = opt.parser.get_option(k)
if option:
opt.parser.remove_option(k)
inst_dir.add_option(option)
inst_dir.add_option('--exec-prefix',
help = 'installation prefix [Default: ${PREFIX}]',
default = '',
dest = 'EXEC_PREFIX')
dirs_options = opt.add_option_group('Pre-defined installation directories', '')
for name, help, default in _options:
option_name = '--' + name
str_default = default
str_help = '%s [Default: %s]' % (help, str_default)
dirs_options.add_option(option_name, help=str_help, default='', dest=name.upper())
ntdb-1.0/buildtools/wafadmin/Tools/gob2.py 0000664 0000000 0000000 00000000536 12241515307 0020556 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Ali Sabil, 2007
import TaskGen
TaskGen.declare_chain(
name = 'gob2',
rule = '${GOB2} -o ${TGT[0].bld_dir(env)} ${GOB2FLAGS} ${SRC}',
ext_in = '.gob',
ext_out = '.c'
)
def detect(conf):
gob2 = conf.find_program('gob2', var='GOB2', mandatory=True)
conf.env['GOB2'] = gob2
conf.env['GOB2FLAGS'] = ''
ntdb-1.0/buildtools/wafadmin/Tools/gxx.py 0000664 0000000 0000000 00000007506 12241515307 0020537 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
import os, sys
import Configure, Options, Utils
import ccroot, ar
from Configure import conftest
@conftest
def find_gxx(conf):
cxx = conf.find_program(['g++', 'c++'], var='CXX', mandatory=True)
cxx = conf.cmd_to_list(cxx)
ccroot.get_cc_version(conf, cxx, gcc=True)
conf.env.CXX_NAME = 'gcc'
conf.env.CXX = cxx
@conftest
def gxx_common_flags(conf):
v = conf.env
# CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS
v['CXXFLAGS_DEBUG'] = ['-g']
v['CXXFLAGS_RELEASE'] = ['-O2']
v['CXX_SRC_F'] = ''
v['CXX_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
v['CXXLNK_SRC_F'] = ''
v['CXXLNK_TGT_F'] = ['-o', ''] # shell hack for -MD
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['RPATH_ST'] = '-Wl,-rpath,%s'
v['CXXDEFINES_ST'] = '-D%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Wl,-Bdynamic'
v['STATICLIB_MARKER'] = '-Wl,-Bstatic'
v['FULLSTATIC_MARKER'] = '-static'
# program
v['program_PATTERN'] = '%s'
# shared library
v['shlib_CXXFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro
v['shlib_LINKFLAGS'] = ['-shared']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
v['staticlib_LINKFLAGS'] = ['-Wl,-Bstatic']
v['staticlib_PATTERN'] = 'lib%s.a'
# osx stuff
v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup']
v['CCFLAGS_MACBUNDLE'] = ['-fPIC']
v['macbundle_PATTERN'] = '%s.bundle'
@conftest
def gxx_modifier_win32(conf):
v = conf.env
v['program_PATTERN'] = '%s.exe'
v['shlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = 'lib%s.dll.a'
v['IMPLIB_ST'] = '-Wl,--out-implib,%s'
dest_arch = v['DEST_CPU']
v['shlib_CXXFLAGS'] = []
v.append_value('shlib_CXXFLAGS', '-DDLL_EXPORT') # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea
# Auto-import is enabled by default even without this option,
# but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
# that the linker emits otherwise.
v.append_value('LINKFLAGS', '-Wl,--enable-auto-import')
@conftest
def gxx_modifier_cygwin(conf):
gxx_modifier_win32(conf)
v = conf.env
v['shlib_PATTERN'] = 'cyg%s.dll'
v.append_value('shlib_LINKFLAGS', '-Wl,--enable-auto-image-base')
@conftest
def gxx_modifier_darwin(conf):
v = conf.env
v['shlib_CXXFLAGS'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
v['shlib_LINKFLAGS'] = ['-dynamiclib']
v['shlib_PATTERN'] = 'lib%s.dylib'
v['staticlib_LINKFLAGS'] = []
v['SHLIB_MARKER'] = ''
v['STATICLIB_MARKER'] = ''
v['SONAME_ST'] = ''
@conftest
def gxx_modifier_aix(conf):
v = conf.env
v['program_LINKFLAGS'] = ['-Wl,-brtl']
v['shlib_LINKFLAGS'] = ['-shared', '-Wl,-brtl,-bexpfull']
v['SHLIB_MARKER'] = ''
@conftest
def gxx_modifier_platform(conf):
# * set configurations specific for a platform.
# * the destination platform is detected automatically by looking at the macros the compiler predefines,
# and if it's not recognised, it fallbacks to sys.platform.
dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
gxx_modifier_func = globals().get('gxx_modifier_' + dest_os)
if gxx_modifier_func:
gxx_modifier_func(conf)
def detect(conf):
conf.find_gxx()
conf.find_cpp()
conf.find_ar()
conf.gxx_common_flags()
conf.gxx_modifier_platform()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
ntdb-1.0/buildtools/wafadmin/Tools/icc.py 0000664 0000000 0000000 00000001435 12241515307 0020462 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Stian Selnes, 2008
# Thomas Nagy 2009
import os, sys
import Configure, Options, Utils
import ccroot, ar, gcc
from Configure import conftest
@conftest
def find_icc(conf):
if sys.platform == 'cygwin':
conf.fatal('The Intel compiler does not work on Cygwin')
v = conf.env
cc = None
if v['CC']: cc = v['CC']
elif 'CC' in conf.environ: cc = conf.environ['CC']
if not cc: cc = conf.find_program('icc', var='CC')
if not cc: cc = conf.find_program('ICL', var='CC')
if not cc: conf.fatal('Intel C Compiler (icc) was not found')
cc = conf.cmd_to_list(cc)
ccroot.get_cc_version(conf, cc, icc=True)
v['CC'] = cc
v['CC_NAME'] = 'icc'
detect = '''
find_icc
find_ar
gcc_common_flags
gcc_modifier_platform
cc_load_tools
cc_add_flags
link_add_flags
'''
ntdb-1.0/buildtools/wafadmin/Tools/icpc.py 0000664 0000000 0000000 00000001355 12241515307 0020643 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy 2009
import os, sys
import Configure, Options, Utils
import ccroot, ar, gxx
from Configure import conftest
@conftest
def find_icpc(conf):
if sys.platform == 'cygwin':
conf.fatal('The Intel compiler does not work on Cygwin')
v = conf.env
cxx = None
if v['CXX']: cxx = v['CXX']
elif 'CXX' in conf.environ: cxx = conf.environ['CXX']
if not cxx: cxx = conf.find_program('icpc', var='CXX')
if not cxx: conf.fatal('Intel C++ Compiler (icpc) was not found')
cxx = conf.cmd_to_list(cxx)
ccroot.get_cc_version(conf, cxx, icc=True)
v['CXX'] = cxx
v['CXX_NAME'] = 'icc'
detect = '''
find_icpc
find_ar
gxx_common_flags
gxx_modifier_platform
cxx_load_tools
cxx_add_flags
link_add_flags
'''
ntdb-1.0/buildtools/wafadmin/Tools/intltool.py 0000664 0000000 0000000 00000011241 12241515307 0021564 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"intltool support"
import os, re
import Configure, TaskGen, Task, Utils, Runner, Options, Build, config_c
from TaskGen import feature, before, taskgen
from Logs import error
"""
Usage:
bld(features='intltool_in', source='a.po b.po', podir='po', cache='.intlcache', flags='')
"""
class intltool_in_taskgen(TaskGen.task_gen):
"""deprecated"""
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@before('apply_core')
@feature('intltool_in')
def iapply_intltool_in_f(self):
try: self.meths.remove('apply_core')
except ValueError: pass
for i in self.to_list(self.source):
node = self.path.find_resource(i)
podir = getattr(self, 'podir', 'po')
podirnode = self.path.find_dir(podir)
if not podirnode:
error("could not find the podir %r" % podir)
continue
cache = getattr(self, 'intlcache', '.intlcache')
self.env['INTLCACHE'] = os.path.join(self.path.bldpath(self.env), podir, cache)
self.env['INTLPODIR'] = podirnode.srcpath(self.env)
self.env['INTLFLAGS'] = getattr(self, 'flags', ['-q', '-u', '-c'])
task = self.create_task('intltool', node, node.change_ext(''))
task.install_path = self.install_path
class intltool_po_taskgen(TaskGen.task_gen):
"""deprecated"""
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('intltool_po')
def apply_intltool_po(self):
try: self.meths.remove('apply_core')
except ValueError: pass
self.default_install_path = '${LOCALEDIR}'
appname = getattr(self, 'appname', 'set_your_app_name')
podir = getattr(self, 'podir', '')
def install_translation(task):
out = task.outputs[0]
filename = out.name
(langname, ext) = os.path.splitext(filename)
inst_file = langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo'
self.bld.install_as(os.path.join(self.install_path, inst_file), out, self.env, self.chmod)
linguas = self.path.find_resource(os.path.join(podir, 'LINGUAS'))
if linguas:
# scan LINGUAS file for locales to process
file = open(linguas.abspath())
langs = []
for line in file.readlines():
# ignore lines containing comments
if not line.startswith('#'):
langs += line.split()
file.close()
re_linguas = re.compile('[-a-zA-Z_@.]+')
for lang in langs:
# Make sure that we only process lines which contain locales
if re_linguas.match(lang):
node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po'))
task = self.create_task('po')
task.set_inputs(node)
task.set_outputs(node.change_ext('.mo'))
if self.bld.is_install: task.install = install_translation
else:
Utils.pprint('RED', "Error no LINGUAS file found in po directory")
Task.simple_task_type('po', '${POCOM} -o ${TGT} ${SRC}', color='BLUE', shell=False)
Task.simple_task_type('intltool',
'${INTLTOOL} ${INTLFLAGS} ${INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}',
color='BLUE', after="cc_link cxx_link", shell=False)
def detect(conf):
pocom = conf.find_program('msgfmt')
if not pocom:
# if msgfmt should not be mandatory, catch the thrown exception in your wscript
conf.fatal('The program msgfmt (gettext) is mandatory!')
conf.env['POCOM'] = pocom
# NOTE: it is possible to set INTLTOOL in the environment, but it must not have spaces in it
intltool = conf.find_program('intltool-merge', var='INTLTOOL')
if not intltool:
# if intltool-merge should not be mandatory, catch the thrown exception in your wscript
if Options.platform == 'win32':
perl = conf.find_program('perl', var='PERL')
if not perl:
conf.fatal('The program perl (required by intltool) could not be found')
intltooldir = Configure.find_file('intltool-merge', os.environ['PATH'].split(os.pathsep))
if not intltooldir:
conf.fatal('The program intltool-merge (intltool, gettext-devel) is mandatory!')
conf.env['INTLTOOL'] = Utils.to_list(conf.env['PERL']) + [intltooldir + os.sep + 'intltool-merge']
conf.check_message('intltool', '', True, ' '.join(conf.env['INTLTOOL']))
else:
conf.fatal('The program intltool-merge (intltool, gettext-devel) is mandatory!')
def getstr(varname):
return getattr(Options.options, varname, '')
prefix = conf.env['PREFIX']
datadir = getstr('datadir')
if not datadir: datadir = os.path.join(prefix,'share')
conf.define('LOCALEDIR', os.path.join(datadir, 'locale'))
conf.define('DATADIR', datadir)
if conf.env['CC'] or conf.env['CXX']:
# Define to 1 if is present
conf.check(header_name='locale.h')
def set_options(opt):
opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
opt.add_option('--datadir', type='string', default='', dest='datadir', help='read-only application data')
ntdb-1.0/buildtools/wafadmin/Tools/javaw.py 0000664 0000000 0000000 00000016321 12241515307 0021034 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2008 (ita)
"""
Java support
Javac is one of the few compilers that behaves very badly:
* it outputs files where it wants to (-d is only for the package root)
* it recompiles files silently behind your back
* it outputs an undefined amount of files (inner classes)
Fortunately, the convention makes it possible to use the build dir without
too many problems for the moment
Inner classes must be located and cleaned when a problem arise,
for the moment waf does not track the production of inner classes.
Adding all the files to a task and executing it if any of the input files
change is only annoying for the compilation times
Compilation can be run using Jython[1] rather than regular Python. Instead of
running one of the following commands:
./waf configure
python waf configure
You would have to run:
java -jar /path/to/jython.jar waf configure
[1] http://www.jython.org/
"""
import os, re
from Configure import conf
import TaskGen, Task, Utils, Options, Build
from TaskGen import feature, before, taskgen
class_check_source = '''
public class Test {
public static void main(String[] argv) {
Class lib;
if (argv.length < 1) {
System.err.println("Missing argument");
System.exit(77);
}
try {
lib = Class.forName(argv[0]);
} catch (ClassNotFoundException e) {
System.err.println("ClassNotFoundException");
System.exit(1);
}
lib = null;
System.exit(0);
}
}
'''
@feature('jar')
@before('apply_core')
def jar_files(self):
basedir = getattr(self, 'basedir', '.')
destfile = getattr(self, 'destfile', 'test.jar')
jaropts = getattr(self, 'jaropts', [])
jarcreate = getattr(self, 'jarcreate', 'cf')
dir = self.path.find_dir(basedir)
if not dir: raise
jaropts.append('-C')
jaropts.append(dir.abspath(self.env))
jaropts.append('.')
out = self.path.find_or_declare(destfile)
tsk = self.create_task('jar_create')
tsk.set_outputs(out)
tsk.inputs = [x for x in dir.find_iter(src=0, bld=1) if x.id != out.id]
tsk.env['JAROPTS'] = jaropts
tsk.env['JARCREATE'] = jarcreate
@feature('javac')
@before('apply_core')
def apply_java(self):
Utils.def_attrs(self, jarname='', jaropts='', classpath='',
sourcepath='.', srcdir='.', source_re='**/*.java',
jar_mf_attributes={}, jar_mf_classpath=[])
if getattr(self, 'source_root', None):
# old stuff
self.srcdir = self.source_root
nodes_lst = []
if not self.classpath:
if not self.env['CLASSPATH']:
self.env['CLASSPATH'] = '..' + os.pathsep + '.'
else:
self.env['CLASSPATH'] = self.classpath
srcdir_node = self.path.find_dir(self.srcdir)
if not srcdir_node:
raise Utils.WafError('could not find srcdir %r' % self.srcdir)
src_nodes = [x for x in srcdir_node.ant_glob(self.source_re, flat=False)]
bld_nodes = [x.change_ext('.class') for x in src_nodes]
self.env['OUTDIR'] = [srcdir_node.bldpath(self.env)]
tsk = self.create_task('javac')
tsk.set_inputs(src_nodes)
tsk.set_outputs(bld_nodes)
if getattr(self, 'compat', None):
tsk.env.append_value('JAVACFLAGS', ['-source', self.compat])
if hasattr(self, 'sourcepath'):
fold = [self.path.find_dir(x) for x in self.to_list(self.sourcepath)]
names = os.pathsep.join([x.srcpath() for x in fold])
else:
names = srcdir_node.srcpath()
if names:
tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names])
if self.jarname:
jtsk = self.create_task('jar_create', bld_nodes, self.path.find_or_declare(self.jarname))
jtsk.set_run_after(tsk)
if not self.env.JAROPTS:
if self.jaropts:
self.env.JAROPTS = self.jaropts
else:
dirs = '.'
self.env.JAROPTS = ['-C', ''.join(self.env['OUTDIR']), dirs]
Task.simple_task_type('jar_create', '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}', color='GREEN', shell=False)
cls = Task.simple_task_type('javac', '${JAVAC} -classpath ${CLASSPATH} -d ${OUTDIR} ${JAVACFLAGS} ${SRC}', shell=False)
cls.color = 'BLUE'
def post_run_javac(self):
"""this is for cleaning the folder
javac creates single files for inner classes
but it is not possible to know which inner classes in advance"""
par = {}
for x in self.inputs:
par[x.parent.id] = x.parent
inner = {}
for k in par.values():
path = k.abspath(self.env)
lst = os.listdir(path)
for u in lst:
if u.find('$') >= 0:
inner_class_node = k.find_or_declare(u)
inner[inner_class_node.id] = inner_class_node
to_add = set(inner.keys()) - set([x.id for x in self.outputs])
for x in to_add:
self.outputs.append(inner[x])
self.cached = True # disable the cache here - inner classes are a problem
return Task.Task.post_run(self)
cls.post_run = post_run_javac
def detect(conf):
# If JAVA_PATH is set, we prepend it to the path list
java_path = conf.environ['PATH'].split(os.pathsep)
v = conf.env
if 'JAVA_HOME' in conf.environ:
java_path = [os.path.join(conf.environ['JAVA_HOME'], 'bin')] + java_path
conf.env['JAVA_HOME'] = [conf.environ['JAVA_HOME']]
for x in 'javac java jar'.split():
conf.find_program(x, var=x.upper(), path_list=java_path)
conf.env[x.upper()] = conf.cmd_to_list(conf.env[x.upper()])
v['JAVA_EXT'] = ['.java']
if 'CLASSPATH' in conf.environ:
v['CLASSPATH'] = conf.environ['CLASSPATH']
if not v['JAR']: conf.fatal('jar is required for making java packages')
if not v['JAVAC']: conf.fatal('javac is required for compiling java classes')
v['JARCREATE'] = 'cf' # can use cvf
@conf
def check_java_class(self, classname, with_classpath=None):
"""Check if the specified java class is installed"""
import shutil
javatestdir = '.waf-javatest'
classpath = javatestdir
if self.env['CLASSPATH']:
classpath += os.pathsep + self.env['CLASSPATH']
if isinstance(with_classpath, str):
classpath += os.pathsep + with_classpath
shutil.rmtree(javatestdir, True)
os.mkdir(javatestdir)
java_file = open(os.path.join(javatestdir, 'Test.java'), 'w')
java_file.write(class_check_source)
java_file.close()
# Compile the source
Utils.exec_command(self.env['JAVAC'] + [os.path.join(javatestdir, 'Test.java')], shell=False)
# Try to run the app
cmd = self.env['JAVA'] + ['-cp', classpath, 'Test', classname]
self.log.write("%s\n" % str(cmd))
found = Utils.exec_command(cmd, shell=False, log=self.log)
self.check_message('Java class %s' % classname, "", not found)
shutil.rmtree(javatestdir, True)
return found
@conf
def check_jni_headers(conf):
"""
Check for jni headers and libraries
On success the environment variable xxx_JAVA is added for uselib
"""
if not conf.env.CC_NAME and not conf.env.CXX_NAME:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not conf.env.JAVA_HOME:
conf.fatal('set JAVA_HOME in the system environment')
# jni requires the jvm
javaHome = conf.env['JAVA_HOME'][0]
b = Build.BuildContext()
b.load_dirs(conf.srcdir, conf.blddir)
dir = b.root.find_dir(conf.env.JAVA_HOME[0] + '/include')
f = dir.ant_glob('**/(jni|jni_md).h', flat=False)
incDirs = [x.parent.abspath() for x in f]
dir = b.root.find_dir(conf.env.JAVA_HOME[0])
f = dir.ant_glob('**/*jvm.(so|dll)', flat=False)
libDirs = [x.parent.abspath() for x in f] or [javaHome]
for i, d in enumerate(libDirs):
if conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm',
libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA'):
break
else:
conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs)
ntdb-1.0/buildtools/wafadmin/Tools/kde4.py 0000664 0000000 0000000 00000004443 12241515307 0020555 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
import os, sys, re
import Options, TaskGen, Task, Utils
from TaskGen import taskgen, feature, after
class msgfmt_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('msgfmt')
def init_msgfmt(self):
#langs = '' # for example "foo/fr foo/br"
self.default_install_path = '${KDE4_LOCALE_INSTALL_DIR}'
@feature('msgfmt')
@after('init_msgfmt')
def apply_msgfmt(self):
for lang in self.to_list(self.langs):
node = self.path.find_resource(lang+'.po')
task = self.create_task('msgfmt', node, node.change_ext('.mo'))
if not self.bld.is_install: continue
langname = lang.split('/')
langname = langname[-1]
task.install_path = self.install_path + os.sep + langname + os.sep + 'LC_MESSAGES'
task.filename = getattr(self, 'appname', 'set_your_appname') + '.mo'
task.chmod = self.chmod
def detect(conf):
kdeconfig = conf.find_program('kde4-config')
if not kdeconfig:
conf.fatal('we need kde4-config')
prefix = Utils.cmd_output('%s --prefix' % kdeconfig, silent=True).strip()
file = '%s/share/apps/cmake/modules/KDELibsDependencies.cmake' % prefix
try: os.stat(file)
except OSError:
file = '%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake' % prefix
try: os.stat(file)
except OSError: conf.fatal('could not open %s' % file)
try:
txt = Utils.readf(file)
except (OSError, IOError):
conf.fatal('could not read %s' % file)
txt = txt.replace('\\\n', '\n')
fu = re.compile('#(.*)\n')
txt = fu.sub('', txt)
setregexp = re.compile('([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)')
found = setregexp.findall(txt)
for (_, key, val) in found:
#print key, val
conf.env[key] = val
# well well, i could just write an interpreter for cmake files
conf.env['LIB_KDECORE']='kdecore'
conf.env['LIB_KDEUI'] ='kdeui'
conf.env['LIB_KIO'] ='kio'
conf.env['LIB_KHTML'] ='khtml'
conf.env['LIB_KPARTS'] ='kparts'
conf.env['LIBPATH_KDECORE'] = conf.env['KDE4_LIB_INSTALL_DIR']
conf.env['CPPPATH_KDECORE'] = conf.env['KDE4_INCLUDE_INSTALL_DIR']
conf.env.append_value('CPPPATH_KDECORE', conf.env['KDE4_INCLUDE_INSTALL_DIR']+"/KDE")
conf.env['MSGFMT'] = conf.find_program('msgfmt')
Task.simple_task_type('msgfmt', '${MSGFMT} ${SRC} -o ${TGT}', color='BLUE', shell=False)
ntdb-1.0/buildtools/wafadmin/Tools/libtool.py 0000664 0000000 0000000 00000022302 12241515307 0021364 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Matthias Jahn, 2008, jahn matthias ath freenet punto de
# Thomas Nagy, 2008 (ita)
import sys, re, os, optparse
import TaskGen, Task, Utils, preproc
from Logs import error, debug, warn
from TaskGen import taskgen, after, before, feature
REVISION="0.1.3"
"""
if you want to use the code here, you must use something like this:
obj = obj.create(...)
obj.features.append("libtool")
obj.vnum = "1.2.3" # optional, but versioned libraries are common
"""
# fake libtool files
fakelibtool_vardeps = ['CXX', 'PREFIX']
def fakelibtool_build(task):
# Writes a .la file, used by libtool
env = task.env
dest = open(task.outputs[0].abspath(env), 'w')
sname = task.inputs[0].name
fu = dest.write
fu("# Generated by ltmain.sh - GNU libtool 1.5.18 - (pwn3d by BKsys II code name WAF)\n")
if env['vnum']:
nums = env['vnum'].split('.')
libname = task.inputs[0].name
name3 = libname+'.'+env['vnum']
name2 = libname+'.'+nums[0]
name1 = libname
fu("dlname='%s'\n" % name2)
strn = " ".join([name3, name2, name1])
fu("library_names='%s'\n" % (strn) )
else:
fu("dlname='%s'\n" % sname)
fu("library_names='%s %s %s'\n" % (sname, sname, sname) )
fu("old_library=''\n")
vars = ' '.join(env['libtoolvars']+env['LINKFLAGS'])
fu("dependency_libs='%s'\n" % vars)
fu("current=0\n")
fu("age=0\nrevision=0\ninstalled=yes\nshouldnotlink=no\n")
fu("dlopen=''\ndlpreopen=''\n")
fu("libdir='%s/lib'\n" % env['PREFIX'])
dest.close()
return 0
def read_la_file(path):
sp = re.compile(r'^([^=]+)=\'(.*)\'$')
dc={}
file = open(path, "r")
for line in file.readlines():
try:
#print sp.split(line.strip())
_, left, right, _ = sp.split(line.strip())
dc[left]=right
except ValueError:
pass
file.close()
return dc
@feature("libtool")
@after('apply_link')
def apply_link_libtool(self):
if self.type != 'program':
linktask = self.link_task
self.latask = self.create_task('fakelibtool', linktask.outputs, linktask.outputs[0].change_ext('.la'))
if self.bld.is_install:
self.bld.install_files('${PREFIX}/lib', linktask.outputs[0], self.env)
@feature("libtool")
@before('apply_core')
def apply_libtool(self):
self.env['vnum']=self.vnum
paths=[]
libs=[]
libtool_files=[]
libtool_vars=[]
for l in self.env['LINKFLAGS']:
if l[:2]=='-L':
paths.append(l[2:])
elif l[:2]=='-l':
libs.append(l[2:])
for l in libs:
for p in paths:
dict = read_la_file(p+'/lib'+l+'.la')
linkflags2 = dict.get('dependency_libs', '')
for v in linkflags2.split():
if v.endswith('.la'):
libtool_files.append(v)
libtool_vars.append(v)
continue
self.env.append_unique('LINKFLAGS', v)
break
self.env['libtoolvars']=libtool_vars
while libtool_files:
file = libtool_files.pop()
dict = read_la_file(file)
for v in dict['dependency_libs'].split():
if v[-3:] == '.la':
libtool_files.append(v)
continue
self.env.append_unique('LINKFLAGS', v)
Task.task_type_from_func('fakelibtool', vars=fakelibtool_vardeps, func=fakelibtool_build, color='BLUE', after="cc_link cxx_link static_link")
class libtool_la_file:
def __init__ (self, la_filename):
self.__la_filename = la_filename
#remove path and .la suffix
self.linkname = str(os.path.split(la_filename)[-1])[:-3]
if self.linkname.startswith("lib"):
self.linkname = self.linkname[3:]
# The name that we can dlopen(3).
self.dlname = None
# Names of this library
self.library_names = None
# The name of the static archive.
self.old_library = None
# Libraries that this one depends upon.
self.dependency_libs = None
# Version information for libIlmImf.
self.current = None
self.age = None
self.revision = None
# Is this an already installed library?
self.installed = None
# Should we warn about portability when linking against -modules?
self.shouldnotlink = None
# Files to dlopen/dlpreopen
self.dlopen = None
self.dlpreopen = None
# Directory that this library needs to be installed in:
self.libdir = '/usr/lib'
if not self.__parse():
raise ValueError("file %s not found!!" %(la_filename))
def __parse(self):
"Retrieve the variables from a file"
if not os.path.isfile(self.__la_filename): return 0
la_file=open(self.__la_filename, 'r')
for line in la_file:
ln = line.strip()
if not ln: continue
if ln[0]=='#': continue
(key, value) = str(ln).split('=', 1)
key = key.strip()
value = value.strip()
if value == "no": value = False
elif value == "yes": value = True
else:
try: value = int(value)
except ValueError: value = value.strip("'")
setattr(self, key, value)
la_file.close()
return 1
def get_libs(self):
"""return linkflags for this lib"""
libs = []
if self.dependency_libs:
libs = str(self.dependency_libs).strip().split()
if libs == None:
libs = []
# add la lib and libdir
libs.insert(0, "-l%s" % self.linkname.strip())
libs.insert(0, "-L%s" % self.libdir.strip())
return libs
def __str__(self):
return '''\
dlname = "%(dlname)s"
library_names = "%(library_names)s"
old_library = "%(old_library)s"
dependency_libs = "%(dependency_libs)s"
version = %(current)s.%(age)s.%(revision)s
installed = "%(installed)s"
shouldnotlink = "%(shouldnotlink)s"
dlopen = "%(dlopen)s"
dlpreopen = "%(dlpreopen)s"
libdir = "%(libdir)s"''' % self.__dict__
class libtool_config:
def __init__ (self, la_filename):
self.__libtool_la_file = libtool_la_file(la_filename)
tmp = self.__libtool_la_file
self.__version = [int(tmp.current), int(tmp.age), int(tmp.revision)]
self.__sub_la_files = []
self.__sub_la_files.append(la_filename)
self.__libs = None
def __cmp__(self, other):
"""make it compareable with X.Y.Z versions (Y and Z are optional)"""
if not other:
return 1
othervers = [int(s) for s in str(other).split(".")]
selfvers = self.__version
return cmp(selfvers, othervers)
def __str__(self):
return "\n".join([
str(self.__libtool_la_file),
' '.join(self.__libtool_la_file.get_libs()),
'* New getlibs:',
' '.join(self.get_libs())
])
def __get_la_libs(self, la_filename):
return libtool_la_file(la_filename).get_libs()
def get_libs(self):
"""return the complete uniqe linkflags that do not
contain .la files anymore"""
libs_list = list(self.__libtool_la_file.get_libs())
libs_map = {}
while len(libs_list) > 0:
entry = libs_list.pop(0)
if entry:
if str(entry).endswith(".la"):
## prevents duplicate .la checks
if entry not in self.__sub_la_files:
self.__sub_la_files.append(entry)
libs_list.extend(self.__get_la_libs(entry))
else:
libs_map[entry]=1
self.__libs = libs_map.keys()
return self.__libs
def get_libs_only_L(self):
if not self.__libs: self.get_libs()
libs = self.__libs
libs = [s for s in libs if str(s).startswith('-L')]
return libs
def get_libs_only_l(self):
if not self.__libs: self.get_libs()
libs = self.__libs
libs = [s for s in libs if str(s).startswith('-l')]
return libs
def get_libs_only_other(self):
if not self.__libs: self.get_libs()
libs = self.__libs
libs = [s for s in libs if not(str(s).startswith('-L')or str(s).startswith('-l'))]
return libs
def useCmdLine():
"""parse cmdline args and control build"""
usage = '''Usage: %prog [options] PathToFile.la
example: %prog --atleast-version=2.0.0 /usr/lib/libIlmImf.la
nor: %prog --libs /usr/lib/libamarok.la'''
parser = optparse.OptionParser(usage)
a = parser.add_option
a("--version", dest = "versionNumber",
action = "store_true", default = False,
help = "output version of libtool-config"
)
a("--debug", dest = "debug",
action = "store_true", default = False,
help = "enable debug"
)
a("--libs", dest = "libs",
action = "store_true", default = False,
help = "output all linker flags"
)
a("--libs-only-l", dest = "libs_only_l",
action = "store_true", default = False,
help = "output -l flags"
)
a("--libs-only-L", dest = "libs_only_L",
action = "store_true", default = False,
help = "output -L flags"
)
a("--libs-only-other", dest = "libs_only_other",
action = "store_true", default = False,
help = "output other libs (e.g. -pthread)"
)
a("--atleast-version", dest = "atleast_version",
default=None,
help = "return 0 if the module is at least version ATLEAST_VERSION"
)
a("--exact-version", dest = "exact_version",
default=None,
help = "return 0 if the module is exactly version EXACT_VERSION"
)
a("--max-version", dest = "max_version",
default=None,
help = "return 0 if the module is at no newer than version MAX_VERSION"
)
(options, args) = parser.parse_args()
if len(args) != 1 and not options.versionNumber:
parser.error("incorrect number of arguments")
if options.versionNumber:
print("libtool-config version %s" % REVISION)
return 0
ltf = libtool_config(args[0])
if options.debug:
print(ltf)
if options.atleast_version:
if ltf >= options.atleast_version: return 0
sys.exit(1)
if options.exact_version:
if ltf == options.exact_version: return 0
sys.exit(1)
if options.max_version:
if ltf <= options.max_version: return 0
sys.exit(1)
def p(x):
print(" ".join(x))
if options.libs: p(ltf.get_libs())
elif options.libs_only_l: p(ltf.get_libs_only_l())
elif options.libs_only_L: p(ltf.get_libs_only_L())
elif options.libs_only_other: p(ltf.get_libs_only_other())
return 0
if __name__ == '__main__':
useCmdLine()
ntdb-1.0/buildtools/wafadmin/Tools/lua.py 0000664 0000000 0000000 00000000751 12241515307 0020505 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Sebastian Schlingmann, 2008
# Thomas Nagy, 2008 (ita)
import TaskGen
from TaskGen import taskgen, feature
from Constants import *
TaskGen.declare_chain(
name = 'luac',
rule = '${LUAC} -s -o ${TGT} ${SRC}',
ext_in = '.lua',
ext_out = '.luac',
reentrant = False,
install = 'LUADIR', # env variable
)
@feature('lua')
def init_lua(self):
self.default_chmod = O755
def detect(conf):
conf.find_program('luac', var='LUAC', mandatory = True)
ntdb-1.0/buildtools/wafadmin/Tools/misc.py 0000664 0000000 0000000 00000027665 12241515307 0020674 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"""
Custom objects:
- execute a function everytime
- copy a file somewhere else
"""
import shutil, re, os
import TaskGen, Node, Task, Utils, Build, Constants
from TaskGen import feature, taskgen, after, before
from Logs import debug
def copy_func(tsk):
"Make a file copy. This might be used to make other kinds of file processing (even calling a compiler is possible)"
env = tsk.env
infile = tsk.inputs[0].abspath(env)
outfile = tsk.outputs[0].abspath(env)
try:
shutil.copy2(infile, outfile)
except (OSError, IOError):
return 1
else:
if tsk.chmod: os.chmod(outfile, tsk.chmod)
return 0
def action_process_file_func(tsk):
"Ask the function attached to the task to process it"
if not tsk.fun: raise Utils.WafError('task must have a function attached to it for copy_func to work!')
return tsk.fun(tsk)
class cmd_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('cmd')
def apply_cmd(self):
"call a command everytime"
if not self.fun: raise Utils.WafError('cmdobj needs a function!')
tsk = Task.TaskBase()
tsk.fun = self.fun
tsk.env = self.env
self.tasks.append(tsk)
tsk.install_path = self.install_path
class copy_taskgen(TaskGen.task_gen):
"By default, make a file copy, if fun is provided, fun will make the copy (or call a compiler, etc)"
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('copy')
@before('apply_core')
def apply_copy(self):
Utils.def_attrs(self, fun=copy_func)
self.default_install_path = 0
lst = self.to_list(self.source)
self.meths.remove('apply_core')
for filename in lst:
node = self.path.find_resource(filename)
if not node: raise Utils.WafError('cannot find input file %s for processing' % filename)
target = self.target
if not target or len(lst)>1: target = node.name
# TODO the file path may be incorrect
newnode = self.path.find_or_declare(target)
tsk = self.create_task('copy', node, newnode)
tsk.fun = self.fun
tsk.chmod = self.chmod
tsk.install_path = self.install_path
if not tsk.env:
tsk.debug()
raise Utils.WafError('task without an environment')
def subst_func(tsk):
"Substitutes variables in a .in file"
m4_re = re.compile('@(\w+)@', re.M)
env = tsk.env
infile = tsk.inputs[0].abspath(env)
outfile = tsk.outputs[0].abspath(env)
code = Utils.readf(infile)
# replace all % by %% to prevent errors by % signs in the input file while string formatting
code = code.replace('%', '%%')
s = m4_re.sub(r'%(\1)s', code)
di = tsk.dict or {}
if not di:
names = m4_re.findall(code)
for i in names:
di[i] = env.get_flat(i) or env.get_flat(i.upper())
file = open(outfile, 'w')
file.write(s % di)
file.close()
if tsk.chmod: os.chmod(outfile, tsk.chmod)
class subst_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('subst')
@before('apply_core')
def apply_subst(self):
Utils.def_attrs(self, fun=subst_func)
self.default_install_path = 0
lst = self.to_list(self.source)
self.meths.remove('apply_core')
self.dict = getattr(self, 'dict', {})
for filename in lst:
node = self.path.find_resource(filename)
if not node: raise Utils.WafError('cannot find input file %s for processing' % filename)
if self.target:
newnode = self.path.find_or_declare(self.target)
else:
newnode = node.change_ext('')
try:
self.dict = self.dict.get_merged_dict()
except AttributeError:
pass
if self.dict and not self.env['DICT_HASH']:
self.env = self.env.copy()
keys = list(self.dict.keys())
keys.sort()
lst = [self.dict[x] for x in keys]
self.env['DICT_HASH'] = str(Utils.h_list(lst))
tsk = self.create_task('copy', node, newnode)
tsk.fun = self.fun
tsk.dict = self.dict
tsk.dep_vars = ['DICT_HASH']
tsk.install_path = self.install_path
tsk.chmod = self.chmod
if not tsk.env:
tsk.debug()
raise Utils.WafError('task without an environment')
####################
## command-output ####
####################
class cmd_arg(object):
"""command-output arguments for representing files or folders"""
def __init__(self, name, template='%s'):
self.name = name
self.template = template
self.node = None
class input_file(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_resource(self.name)
if self.node is None:
raise Utils.WafError("Input file %s not found in " % (self.name, base_path))
def get_path(self, env, absolute):
if absolute:
return self.template % self.node.abspath(env)
else:
return self.template % self.node.srcpath(env)
class output_file(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_or_declare(self.name)
if self.node is None:
raise Utils.WafError("Output file %s not found in " % (self.name, base_path))
def get_path(self, env, absolute):
if absolute:
return self.template % self.node.abspath(env)
else:
return self.template % self.node.bldpath(env)
class cmd_dir_arg(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_dir(self.name)
if self.node is None:
raise Utils.WafError("Directory %s not found in " % (self.name, base_path))
class input_dir(cmd_dir_arg):
def get_path(self, dummy_env, dummy_absolute):
return self.template % self.node.abspath()
class output_dir(cmd_dir_arg):
def get_path(self, env, dummy_absolute):
return self.template % self.node.abspath(env)
class command_output(Task.Task):
color = "BLUE"
def __init__(self, env, command, command_node, command_args, stdin, stdout, cwd, os_env, stderr):
Task.Task.__init__(self, env, normal=1)
assert isinstance(command, (str, Node.Node))
self.command = command
self.command_args = command_args
self.stdin = stdin
self.stdout = stdout
self.cwd = cwd
self.os_env = os_env
self.stderr = stderr
if command_node is not None: self.dep_nodes = [command_node]
self.dep_vars = [] # additional environment variables to look
def run(self):
task = self
#assert len(task.inputs) > 0
def input_path(node, template):
if task.cwd is None:
return template % node.bldpath(task.env)
else:
return template % node.abspath()
def output_path(node, template):
fun = node.abspath
if task.cwd is None: fun = node.bldpath
return template % fun(task.env)
if isinstance(task.command, Node.Node):
argv = [input_path(task.command, '%s')]
else:
argv = [task.command]
for arg in task.command_args:
if isinstance(arg, str):
argv.append(arg)
else:
assert isinstance(arg, cmd_arg)
argv.append(arg.get_path(task.env, (task.cwd is not None)))
if task.stdin:
stdin = open(input_path(task.stdin, '%s'))
else:
stdin = None
if task.stdout:
stdout = open(output_path(task.stdout, '%s'), "w")
else:
stdout = None
if task.stderr:
stderr = open(output_path(task.stderr, '%s'), "w")
else:
stderr = None
if task.cwd is None:
cwd = ('None (actually %r)' % os.getcwd())
else:
cwd = repr(task.cwd)
debug("command-output: cwd=%s, stdin=%r, stdout=%r, argv=%r" %
(cwd, stdin, stdout, argv))
if task.os_env is None:
os_env = os.environ
else:
os_env = task.os_env
command = Utils.pproc.Popen(argv, stdin=stdin, stdout=stdout, stderr=stderr, cwd=task.cwd, env=os_env)
return command.wait()
class cmd_output_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('command-output')
def init_cmd_output(self):
Utils.def_attrs(self,
stdin = None,
stdout = None,
stderr = None,
# the command to execute
command = None,
# whether it is an external command; otherwise it is assumed
# to be an executable binary or script that lives in the
# source or build tree.
command_is_external = False,
# extra parameters (argv) to pass to the command (excluding
# the command itself)
argv = [],
# dependencies to other objects -> this is probably not what you want (ita)
# values must be 'task_gen' instances (not names!)
dependencies = [],
# dependencies on env variable contents
dep_vars = [],
# input files that are implicit, i.e. they are not
# stdin, nor are they mentioned explicitly in argv
hidden_inputs = [],
# output files that are implicit, i.e. they are not
# stdout, nor are they mentioned explicitly in argv
hidden_outputs = [],
# change the subprocess to this cwd (must use obj.input_dir() or output_dir() here)
cwd = None,
# OS environment variables to pass to the subprocess
# if None, use the default environment variables unchanged
os_env = None)
@feature('command-output')
@after('init_cmd_output')
def apply_cmd_output(self):
if self.command is None:
raise Utils.WafError("command-output missing command")
if self.command_is_external:
cmd = self.command
cmd_node = None
else:
cmd_node = self.path.find_resource(self.command)
assert cmd_node is not None, ('''Could not find command '%s' in source tree.
Hint: if this is an external command,
use command_is_external=True''') % (self.command,)
cmd = cmd_node
if self.cwd is None:
cwd = None
else:
assert isinstance(cwd, CmdDirArg)
self.cwd.find_node(self.path)
args = []
inputs = []
outputs = []
for arg in self.argv:
if isinstance(arg, cmd_arg):
arg.find_node(self.path)
if isinstance(arg, input_file):
inputs.append(arg.node)
if isinstance(arg, output_file):
outputs.append(arg.node)
if self.stdout is None:
stdout = None
else:
assert isinstance(self.stdout, str)
stdout = self.path.find_or_declare(self.stdout)
if stdout is None:
raise Utils.WafError("File %s not found" % (self.stdout,))
outputs.append(stdout)
if self.stderr is None:
stderr = None
else:
assert isinstance(self.stderr, str)
stderr = self.path.find_or_declare(self.stderr)
if stderr is None:
raise Utils.WafError("File %s not found" % (self.stderr,))
outputs.append(stderr)
if self.stdin is None:
stdin = None
else:
assert isinstance(self.stdin, str)
stdin = self.path.find_resource(self.stdin)
if stdin is None:
raise Utils.WafError("File %s not found" % (self.stdin,))
inputs.append(stdin)
for hidden_input in self.to_list(self.hidden_inputs):
node = self.path.find_resource(hidden_input)
if node is None:
raise Utils.WafError("File %s not found in dir %s" % (hidden_input, self.path))
inputs.append(node)
for hidden_output in self.to_list(self.hidden_outputs):
node = self.path.find_or_declare(hidden_output)
if node is None:
raise Utils.WafError("File %s not found in dir %s" % (hidden_output, self.path))
outputs.append(node)
if not (inputs or getattr(self, 'no_inputs', None)):
raise Utils.WafError('command-output objects must have at least one input file or give self.no_inputs')
if not (outputs or getattr(self, 'no_outputs', None)):
raise Utils.WafError('command-output objects must have at least one output file or give self.no_outputs')
task = command_output(self.env, cmd, cmd_node, self.argv, stdin, stdout, cwd, self.os_env, stderr)
Utils.copy_attrs(self, task, 'before after ext_in ext_out', only_if_set=True)
self.tasks.append(task)
task.inputs = inputs
task.outputs = outputs
task.dep_vars = self.to_list(self.dep_vars)
for dep in self.dependencies:
assert dep is not self
dep.post()
for dep_task in dep.tasks:
task.set_run_after(dep_task)
if not task.inputs:
# the case for svnversion, always run, and update the output nodes
task.runnable_status = type(Task.TaskBase.run)(runnable_status, task, task.__class__) # always run
task.post_run = type(Task.TaskBase.run)(post_run, task, task.__class__)
# TODO the case with no outputs?
def post_run(self):
for x in self.outputs:
h = Utils.h_file(x.abspath(self.env))
self.generator.bld.node_sigs[self.env.variant()][x.id] = h
def runnable_status(self):
return Constants.RUN_ME
Task.task_type_from_func('copy', vars=[], func=action_process_file_func)
TaskGen.task_gen.classes['command-output'] = cmd_output_taskgen
ntdb-1.0/buildtools/wafadmin/Tools/msvc.py 0000664 0000000 0000000 00000064005 12241515307 0020676 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2006 (dv)
# Tamas Pal, 2007 (folti)
# Nicolas Mercier, 2009
# Microsoft Visual C++/Intel C++ compiler support - beta, needs more testing
# usage:
#
# conf.env['MSVC_VERSIONS'] = ['msvc 9.0', 'msvc 8.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0']
# conf.env['MSVC_TARGETS'] = ['x64']
# conf.check_tool('msvc')
# OR conf.check_tool('msvc', funs='no_autodetect')
# conf.check_lib_msvc('gdi32')
# conf.check_libs_msvc('kernel32 user32', mandatory=true)
# ...
# obj.uselib = 'KERNEL32 USER32 GDI32'
#
# platforms and targets will be tested in the order they appear;
# the first good configuration will be used
# supported platforms :
# ia64, x64, x86, x86_amd64, x86_ia64
# compilers supported :
# msvc => Visual Studio, versions 7.1 (2003), 8,0 (2005), 9.0 (2008)
# wsdk => Windows SDK, versions 6.0, 6.1, 7.0
# icl => Intel compiler, versions 9,10,11
# Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i)
# PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i)
import os, sys, re, string, optparse
import Utils, TaskGen, Runner, Configure, Task, Options
from Logs import debug, info, warn, error
from TaskGen import after, before, feature
from Configure import conftest, conf
import ccroot, cc, cxx, ar, winres
from libtool import read_la_file
try:
import _winreg
except:
import winreg as _winreg
pproc = Utils.pproc
# importlibs provided by MSVC/Platform SDK. Do NOT search them....
g_msvc_systemlibs = """
aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet
cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs
credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d
ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp
faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid
gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop
kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi
mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree
msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm
netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp
odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32
osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu
ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm
rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32
shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32
traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg
version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm
wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp
""".split()
all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64') ]
all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ]
all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')]
def setup_msvc(conf, versions):
platforms = Utils.to_list(conf.env['MSVC_TARGETS']) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms]
desired_versions = conf.env['MSVC_VERSIONS'] or [v for v,_ in versions][::-1]
versiondict = dict(versions)
for version in desired_versions:
try:
targets = dict(versiondict [version])
for target in platforms:
try:
arch,(p1,p2,p3) = targets[target]
compiler,revision = version.split()
return compiler,revision,p1,p2,p3
except KeyError: continue
except KeyError: continue
conf.fatal('msvc: Impossible to find a valid architecture for building (in setup_msvc)')
@conf
def get_msvc_version(conf, compiler, version, target, vcvars):
debug('msvc: get_msvc_version: %r %r %r', compiler, version, target)
batfile = os.path.join(conf.blddir, 'waf-print-msvc.bat')
f = open(batfile, 'w')
f.write("""@echo off
set INCLUDE=
set LIB=
call "%s" %s
echo PATH=%%PATH%%
echo INCLUDE=%%INCLUDE%%
echo LIB=%%LIB%%
""" % (vcvars,target))
f.close()
sout = Utils.cmd_output(['cmd', '/E:on', '/V:on', '/C', batfile])
lines = sout.splitlines()
for x in ('Setting environment', 'Setting SDK environment', 'Intel(R) C++ Compiler'):
if lines[0].find(x) != -1:
break
else:
debug('msvc: get_msvc_version: %r %r %r -> not found', compiler, version, target)
conf.fatal('msvc: Impossible to find a valid architecture for building (in get_msvc_version)')
for line in lines[1:]:
if line.startswith('PATH='):
path = line[5:]
MSVC_PATH = path.split(';')
elif line.startswith('INCLUDE='):
MSVC_INCDIR = [i for i in line[8:].split(';') if i]
elif line.startswith('LIB='):
MSVC_LIBDIR = [i for i in line[4:].split(';') if i]
# Check if the compiler is usable at all.
# The detection may return 64-bit versions even on 32-bit systems, and these would fail to run.
env = {}
env.update(os.environ)
env.update(PATH = path)
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
cxx = conf.find_program(compiler_name, path_list=MSVC_PATH)
# delete CL if exists. because it could contain parameters wich can change cl's behaviour rather catastrophically.
if env.has_key('CL'):
del(env['CL'])
try:
p = pproc.Popen([cxx, '/help'], env=env, stdout=pproc.PIPE, stderr=pproc.PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise Exception('return code: %r: %r' % (p.returncode, err))
except Exception, e:
debug('msvc: get_msvc_version: %r %r %r -> failure', compiler, version, target)
debug(str(e))
conf.fatal('msvc: cannot run the compiler (in get_msvc_version)')
else:
debug('msvc: get_msvc_version: %r %r %r -> OK', compiler, version, target)
return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR)
@conf
def gather_wsdk_versions(conf, versions):
version_pattern = re.compile('^v..?.?\...?.?')
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
return
index = 0
while 1:
try:
version = _winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
try:
msvc_version = _winreg.OpenKey(all_versions, version)
path,type = _winreg.QueryValueEx(msvc_version,'InstallationFolder')
except WindowsError:
continue
if os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')):
targets = []
for target,arch in all_msvc_platforms:
try:
targets.append((target, (arch, conf.get_msvc_version('wsdk', version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd')))))
except Configure.ConfigurationError:
pass
versions.append(('wsdk ' + version[1:], targets))
@conf
def gather_msvc_versions(conf, versions):
# checks SmartPhones SDKs
try:
ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
try:
ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
ce_sdk = ''
if ce_sdk:
supported_wince_platforms = []
ce_index = 0
while 1:
try:
sdk_device = _winreg.EnumKey(ce_sdk, ce_index)
except WindowsError:
break
ce_index = ce_index + 1
sdk = _winreg.OpenKey(ce_sdk, sdk_device)
path,type = _winreg.QueryValueEx(sdk, 'SDKRootDir')
path=str(path)
path,device = os.path.split(path)
if not device:
path,device = os.path.split(path)
for arch,compiler in all_wince_platforms:
platforms = []
if os.path.isdir(os.path.join(path, device, 'Lib', arch)):
platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch)))
if platforms:
supported_wince_platforms.append((device, platforms))
# checks MSVC
version_pattern = re.compile('^..?\...?')
for vcver,vcvar in [('VCExpress','exp'), ('VisualStudio','')]:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\'+vcver)
except WindowsError:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\'+vcver)
except WindowsError:
continue
index = 0
while 1:
try:
version = _winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
try:
msvc_version = _winreg.OpenKey(all_versions, version + "\\Setup\\VS")
path,type = _winreg.QueryValueEx(msvc_version, 'ProductDir')
path=str(path)
targets = []
if ce_sdk:
for device,platforms in supported_wince_platforms:
cetargets = []
for platform,compiler,include,lib in platforms:
winCEpath = os.path.join(path, 'VC', 'ce')
if os.path.isdir(winCEpath):
common_bindirs,_1,_2 = conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat'))
if os.path.isdir(os.path.join(winCEpath, 'lib', platform)):
bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] + common_bindirs
incdirs = [include, os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include')]
libdirs = [lib, os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform)]
cetargets.append((platform, (platform, (bindirs,incdirs,libdirs))))
versions.append((device+' '+version, cetargets))
if os.path.isfile(os.path.join(path, 'VC', 'vcvarsall.bat')):
for target,realtarget in all_msvc_platforms[::-1]:
try:
targets.append((target, (realtarget, conf.get_msvc_version('msvc', version, target, os.path.join(path, 'VC', 'vcvarsall.bat')))))
except:
pass
elif os.path.isfile(os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')):
try:
targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')))))
except Configure.ConfigurationError:
pass
versions.append(('msvc '+version, targets))
except WindowsError:
continue
@conf
def gather_icl_versions(conf, versions):
version_pattern = re.compile('^...?.?\....?.?')
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++')
except WindowsError:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++')
except WindowsError:
return
index = 0
while 1:
try:
version = _winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
targets = []
for target,arch in all_icl_platforms:
try:
icl_version = _winreg.OpenKey(all_versions, version+'\\'+target)
path,type = _winreg.QueryValueEx(icl_version,'ProductDir')
if os.path.isfile(os.path.join(path, 'bin', 'iclvars.bat')):
try:
targets.append((target, (arch, conf.get_msvc_version('intel', version, target, os.path.join(path, 'bin', 'iclvars.bat')))))
except Configure.ConfigurationError:
pass
except WindowsError:
continue
major = version[0:2]
versions.append(('intel ' + major, targets))
@conf
def get_msvc_versions(conf):
if not conf.env.MSVC_INSTALLED_VERSIONS:
lst = []
conf.gather_msvc_versions(lst)
conf.gather_wsdk_versions(lst)
conf.gather_icl_versions(lst)
conf.env.MSVC_INSTALLED_VERSIONS = lst
return conf.env.MSVC_INSTALLED_VERSIONS
@conf
def print_all_msvc_detected(conf):
for version,targets in conf.env['MSVC_INSTALLED_VERSIONS']:
info(version)
for target,l in targets:
info("\t"+target)
def detect_msvc(conf):
versions = get_msvc_versions(conf)
return setup_msvc(conf, versions)
@conf
def find_lt_names_msvc(self, libname, is_static=False):
"""
Win32/MSVC specific code to glean out information from libtool la files.
this function is not attached to the task_gen class
"""
lt_names=[
'lib%s.la' % libname,
'%s.la' % libname,
]
for path in self.env['LIBPATH']:
for la in lt_names:
laf=os.path.join(path,la)
dll=None
if os.path.exists(laf):
ltdict=read_la_file(laf)
lt_libdir=None
if ltdict.get('libdir', ''):
lt_libdir = ltdict['libdir']
if not is_static and ltdict.get('library_names', ''):
dllnames=ltdict['library_names'].split()
dll=dllnames[0].lower()
dll=re.sub('\.dll$', '', dll)
return (lt_libdir, dll, False)
elif ltdict.get('old_library', ''):
olib=ltdict['old_library']
if os.path.exists(os.path.join(path,olib)):
return (path, olib, True)
elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)):
return (lt_libdir, olib, True)
else:
return (None, olib, True)
else:
raise Utils.WafError('invalid libtool object file: %s' % laf)
return (None, None, None)
@conf
def libname_msvc(self, libname, is_static=False, mandatory=False):
lib = libname.lower()
lib = re.sub('\.lib$','',lib)
if lib in g_msvc_systemlibs:
return lib
lib=re.sub('^lib','',lib)
if lib == 'm':
return None
(lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static)
if lt_path != None and lt_libname != None:
if lt_static == True:
# file existance check has been made by find_lt_names
return os.path.join(lt_path,lt_libname)
if lt_path != None:
_libpaths=[lt_path] + self.env['LIBPATH']
else:
_libpaths=self.env['LIBPATH']
static_libs=[
'lib%ss.lib' % lib,
'lib%s.lib' % lib,
'%ss.lib' % lib,
'%s.lib' %lib,
]
dynamic_libs=[
'lib%s.dll.lib' % lib,
'lib%s.dll.a' % lib,
'%s.dll.lib' % lib,
'%s.dll.a' % lib,
'lib%s_d.lib' % lib,
'%s_d.lib' % lib,
'%s.lib' %lib,
]
libnames=static_libs
if not is_static:
libnames=dynamic_libs + static_libs
for path in _libpaths:
for libn in libnames:
if os.path.exists(os.path.join(path, libn)):
debug('msvc: lib found: %s', os.path.join(path,libn))
return re.sub('\.lib$', '',libn)
#if no lib can be found, just return the libname as msvc expects it
if mandatory:
self.fatal("The library %r could not be found" % libname)
return re.sub('\.lib$', '', libname)
@conf
def check_lib_msvc(self, libname, is_static=False, uselib_store=None, mandatory=False):
"This is the api to use"
libn = self.libname_msvc(libname, is_static, mandatory)
if not uselib_store:
uselib_store = libname.upper()
# Note: ideally we should be able to place the lib in the right env var, either STATICLIB or LIB,
# but we don't distinguish static libs from shared libs.
# This is ok since msvc doesn't have any special linker flag to select static libs (no env['STATICLIB_MARKER'])
if False and is_static: # disabled
self.env['STATICLIB_' + uselib_store] = [libn]
else:
self.env['LIB_' + uselib_store] = [libn]
@conf
def check_libs_msvc(self, libnames, is_static=False, mandatory=False):
for libname in Utils.to_list(libnames):
self.check_lib_msvc(libname, is_static, mandatory=mandatory)
@conftest
def no_autodetect(conf):
conf.eval_rules(detect.replace('autodetect', ''))
detect = '''
autodetect
find_msvc
msvc_common_flags
cc_load_tools
cxx_load_tools
cc_add_flags
cxx_add_flags
link_add_flags
'''
@conftest
def autodetect(conf):
v = conf.env
compiler, version, path, includes, libdirs = detect_msvc(conf)
v['PATH'] = path
v['CPPPATH'] = includes
v['LIBPATH'] = libdirs
v['MSVC_COMPILER'] = compiler
def _get_prog_names(conf, compiler):
if compiler=='intel':
compiler_name = 'ICL'
linker_name = 'XILINK'
lib_name = 'XILIB'
else:
# assumes CL.exe
compiler_name = 'CL'
linker_name = 'LINK'
lib_name = 'LIB'
return compiler_name, linker_name, lib_name
@conftest
def find_msvc(conf):
# due to path format limitations, limit operation only to native Win32. Yeah it sucks.
if sys.platform != 'win32':
conf.fatal('MSVC module only works under native Win32 Python! cygwin is not supported yet')
v = conf.env
compiler, version, path, includes, libdirs = detect_msvc(conf)
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
has_msvc_manifest = (compiler == 'msvc' and float(version) >= 8) or (compiler == 'wsdk' and float(version) >= 6) or (compiler == 'intel' and float(version) >= 11)
# compiler
cxx = None
if v.CXX: cxx = v.CXX
elif 'CXX' in conf.environ: cxx = conf.environ['CXX']
if not cxx: cxx = conf.find_program(compiler_name, var='CXX', path_list=path, mandatory=True)
cxx = conf.cmd_to_list(cxx)
# before setting anything, check if the compiler is really msvc
env = dict(conf.environ)
env.update(PATH = ';'.join(path))
if not Utils.cmd_output([cxx, '/nologo', '/?'], silent=True, env=env):
conf.fatal('the msvc compiler could not be identified')
link = v.LINK_CXX
if not link:
link = conf.find_program(linker_name, path_list=path, mandatory=True)
ar = v.AR
if not ar:
ar = conf.find_program(lib_name, path_list=path, mandatory=True)
# manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later
mt = v.MT
if has_msvc_manifest:
mt = conf.find_program('MT', path_list=path, mandatory=True)
# no more possibility of failure means the data state will be consistent
# we may store the data safely now
v.MSVC_MANIFEST = has_msvc_manifest
v.PATH = path
v.CPPPATH = includes
v.LIBPATH = libdirs
# c/c++ compiler
v.CC = v.CXX = cxx
v.CC_NAME = v.CXX_NAME = 'msvc'
v.LINK = v.LINK_CXX = link
if not v.LINK_CC:
v.LINK_CC = v.LINK_CXX
v.AR = ar
v.MT = mt
v.MTFLAGS = v.ARFLAGS = ['/NOLOGO']
conf.check_tool('winres')
if not conf.env.WINRC:
warn('Resource compiler not found. Compiling resource file is disabled')
# environment flags
try: v.prepend_value('CPPPATH', conf.environ['INCLUDE'])
except KeyError: pass
try: v.prepend_value('LIBPATH', conf.environ['LIB'])
except KeyError: pass
@conftest
def msvc_common_flags(conf):
v = conf.env
v['CPPFLAGS'] = ['/W3', '/nologo']
v['CCDEFINES_ST'] = '/D%s'
v['CXXDEFINES_ST'] = '/D%s'
# TODO just use _WIN32, which defined by the compiler itself!
v['CCDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway
v['CXXDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway
v['_CCINCFLAGS'] = []
v['_CCDEFFLAGS'] = []
v['_CXXINCFLAGS'] = []
v['_CXXDEFFLAGS'] = []
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['/c', '/Fo']
v['CXX_SRC_F'] = ''
v['CXX_TGT_F'] = ['/c', '/Fo']
v['CPPPATH_ST'] = '/I%s' # template for adding include paths
v['AR_TGT_F'] = v['CCLNK_TGT_F'] = v['CXXLNK_TGT_F'] = '/OUT:'
# Subsystem specific flags
v['CPPFLAGS_CONSOLE'] = ['/SUBSYSTEM:CONSOLE']
v['CPPFLAGS_NATIVE'] = ['/SUBSYSTEM:NATIVE']
v['CPPFLAGS_POSIX'] = ['/SUBSYSTEM:POSIX']
v['CPPFLAGS_WINDOWS'] = ['/SUBSYSTEM:WINDOWS']
v['CPPFLAGS_WINDOWSCE'] = ['/SUBSYSTEM:WINDOWSCE']
# CRT specific flags
v['CPPFLAGS_CRT_MULTITHREADED'] = ['/MT']
v['CPPFLAGS_CRT_MULTITHREADED_DLL'] = ['/MD']
# TODO these are defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED'] = ['_MT'] # this is defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED_DLL'] = ['_MT', '_DLL'] # these are defined by the compiler itself!
v['CPPFLAGS_CRT_MULTITHREADED_DBG'] = ['/MTd']
v['CPPFLAGS_CRT_MULTITHREADED_DLL_DBG'] = ['/MDd']
# TODO these are defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED_DBG'] = ['_DEBUG', '_MT'] # these are defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED_DLL_DBG'] = ['_DEBUG', '_MT', '_DLL'] # these are defined by the compiler itself!
# compiler debug levels
v['CCFLAGS'] = ['/TC']
v['CCFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG']
v['CCFLAGS_RELEASE'] = ['/O2', '/DNDEBUG']
v['CCFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI']
v['CCFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI']
v['CXXFLAGS'] = ['/TP', '/EHsc']
v['CXXFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG']
v['CXXFLAGS_RELEASE'] = ['/O2', '/DNDEBUG']
v['CXXFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI']
v['CXXFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI']
# linker
v['LIB'] = []
v['LIB_ST'] = '%s.lib' # template for adding libs
v['LIBPATH_ST'] = '/LIBPATH:%s' # template for adding libpaths
v['STATICLIB_ST'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib'
v['STATICLIBPATH_ST'] = '/LIBPATH:%s'
v['LINKFLAGS'] = ['/NOLOGO']
if v['MSVC_MANIFEST']:
v.append_value('LINKFLAGS', '/MANIFEST')
v['LINKFLAGS_DEBUG'] = ['/DEBUG']
v['LINKFLAGS_ULTRADEBUG'] = ['/DEBUG']
# shared library
v['shlib_CCFLAGS'] = ['']
v['shlib_CXXFLAGS'] = ['']
v['shlib_LINKFLAGS']= ['/DLL']
v['shlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = '%s.lib'
v['IMPLIB_ST'] = '/IMPLIB:%s'
# static library
v['staticlib_LINKFLAGS'] = ['']
v['staticlib_PATTERN'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib'
# program
v['program_PATTERN'] = '%s.exe'
#######################################################################################################
##### conf above, build below
@after('apply_link')
@feature('cc', 'cxx')
def apply_flags_msvc(self):
if self.env.CC_NAME != 'msvc' or not self.link_task:
return
subsystem = getattr(self, 'subsystem', '')
if subsystem:
subsystem = '/subsystem:%s' % subsystem
flags = 'cstaticlib' in self.features and 'ARFLAGS' or 'LINKFLAGS'
self.env.append_value(flags, subsystem)
if getattr(self, 'link_task', None) and not 'cstaticlib' in self.features:
for f in self.env.LINKFLAGS:
d = f.lower()
if d[1:] == 'debug':
pdbnode = self.link_task.outputs[0].change_ext('.pdb')
pdbfile = pdbnode.bldpath(self.env)
self.link_task.outputs.append(pdbnode)
self.bld.install_files(self.install_path, [pdbnode], env=self.env)
break
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_lib_vars')
@before('apply_obj_vars')
def apply_obj_vars_msvc(self):
if self.env['CC_NAME'] != 'msvc':
return
try:
self.meths.remove('apply_obj_vars')
except ValueError:
pass
libpaths = getattr(self, 'libpaths', [])
if not libpaths: self.libpaths = libpaths
env = self.env
app = env.append_unique
cpppath_st = env['CPPPATH_ST']
lib_st = env['LIB_ST']
staticlib_st = env['STATICLIB_ST']
libpath_st = env['LIBPATH_ST']
staticlibpath_st = env['STATICLIBPATH_ST']
for i in env['LIBPATH']:
app('LINKFLAGS', libpath_st % i)
if not libpaths.count(i):
libpaths.append(i)
for i in env['LIBPATH']:
app('LINKFLAGS', staticlibpath_st % i)
if not libpaths.count(i):
libpaths.append(i)
# i doubt that anyone will make a fully static binary anyway
if not env['FULLSTATIC']:
if env['STATICLIB'] or env['LIB']:
app('LINKFLAGS', env['SHLIB_MARKER']) # TODO does SHLIB_MARKER work?
for i in env['STATICLIB']:
app('LINKFLAGS', staticlib_st % i)
for i in env['LIB']:
app('LINKFLAGS', lib_st % i)
# split the manifest file processing from the link task, like for the rc processing
@feature('cprogram', 'cshlib')
@after('apply_link')
def apply_manifest(self):
"""Special linker for MSVC with support for embedding manifests into DLL's
and executables compiled by Visual Studio 2005 or probably later. Without
the manifest file, the binaries are unusable.
See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx"""
if self.env.CC_NAME == 'msvc' and self.env.MSVC_MANIFEST:
out_node = self.link_task.outputs[0]
man_node = out_node.parent.find_or_declare(out_node.name + '.manifest')
self.link_task.outputs.append(man_node)
self.link_task.do_manifest = True
def exec_mf(self):
env = self.env
mtool = env['MT']
if not mtool:
return 0
self.do_manifest = False
outfile = self.outputs[0].bldpath(env)
manifest = None
for out_node in self.outputs:
if out_node.name.endswith('.manifest'):
manifest = out_node.bldpath(env)
break
if manifest is None:
# Should never get here. If we do, it means the manifest file was
# never added to the outputs list, thus we don't have a manifest file
# to embed, so we just return.
return 0
# embedding mode. Different for EXE's and DLL's.
# see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx
mode = ''
if 'cprogram' in self.generator.features:
mode = '1'
elif 'cshlib' in self.generator.features:
mode = '2'
debug('msvc: embedding manifest')
#flags = ' '.join(env['MTFLAGS'] or [])
lst = []
lst.extend([env['MT']])
lst.extend(Utils.to_list(env['MTFLAGS']))
lst.extend(Utils.to_list("-manifest"))
lst.extend(Utils.to_list(manifest))
lst.extend(Utils.to_list("-outputresource:%s;%s" % (outfile, mode)))
#cmd='%s %s -manifest "%s" -outputresource:"%s";#%s' % (mtool, flags,
# manifest, outfile, mode)
lst = [lst]
return self.exec_command(*lst)
########## stupid evil command modification: concatenate the tokens /Fx, /doc, and /x: with the next token
def exec_command_msvc(self, *k, **kw):
"instead of quoting all the paths and keep using the shell, we can just join the options msvc is interested in"
if self.env['CC_NAME'] == 'msvc':
if isinstance(k[0], list):
lst = []
carry = ''
for a in k[0]:
if len(a) == 3 and a.startswith('/F') or a == '/doc' or a[-1] == ':':
carry = a
else:
lst.append(carry + a)
carry = ''
k = [lst]
env = dict(os.environ)
env.update(PATH = ';'.join(self.env['PATH']))
kw['env'] = env
ret = self.generator.bld.exec_command(*k, **kw)
if ret: return ret
if getattr(self, 'do_manifest', None):
ret = exec_mf(self)
return ret
for k in 'cc cxx winrc cc_link cxx_link static_link qxx'.split():
cls = Task.TaskBase.classes.get(k, None)
if cls:
cls.exec_command = exec_command_msvc
ntdb-1.0/buildtools/wafadmin/Tools/nasm.py 0000664 0000000 0000000 00000002474 12241515307 0020666 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2008
"""
Nasm processing
"""
import os
import TaskGen, Task, Utils
from TaskGen import taskgen, before, extension
nasm_str = '${NASM} ${NASM_FLAGS} ${NASM_INCLUDES} ${SRC} -o ${TGT}'
EXT_NASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP']
@before('apply_link')
def apply_nasm_vars(self):
# flags
if hasattr(self, 'nasm_flags'):
for flag in self.to_list(self.nasm_flags):
self.env.append_value('NASM_FLAGS', flag)
# includes - well, if we suppose it works with c processing
if hasattr(self, 'includes'):
for inc in self.to_list(self.includes):
node = self.path.find_dir(inc)
if not node:
raise Utils.WafError('cannot find the dir' + inc)
self.env.append_value('NASM_INCLUDES', '-I%s' % node.srcpath(self.env))
self.env.append_value('NASM_INCLUDES', '-I%s' % node.bldpath(self.env))
@extension(EXT_NASM)
def nasm_file(self, node):
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
task = self.create_task('nasm', node, node.change_ext(obj_ext))
self.compiled_tasks.append(task)
self.meths.append('apply_nasm_vars')
# create our action here
Task.simple_task_type('nasm', nasm_str, color='BLUE', ext_out='.o', shell=False)
def detect(conf):
nasm = conf.find_program(['nasm', 'yasm'], var='NASM', mandatory=True)
ntdb-1.0/buildtools/wafadmin/Tools/ocaml.py 0000664 0000000 0000000 00000021605 12241515307 0021020 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"ocaml support"
import os, re
import TaskGen, Utils, Task, Build
from Logs import error
from TaskGen import taskgen, feature, before, after, extension
EXT_MLL = ['.mll']
EXT_MLY = ['.mly']
EXT_MLI = ['.mli']
EXT_MLC = ['.c']
EXT_ML = ['.ml']
open_re = re.compile('^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M)
foo = re.compile(r"""(\(\*)|(\*\))|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^()*"'\\]*)""", re.M)
def filter_comments(txt):
meh = [0]
def repl(m):
if m.group(1): meh[0] += 1
elif m.group(2): meh[0] -= 1
elif not meh[0]: return m.group(0)
return ''
return foo.sub(repl, txt)
def scan(self):
node = self.inputs[0]
code = filter_comments(node.read(self.env))
global open_re
names = []
import_iterator = open_re.finditer(code)
if import_iterator:
for import_match in import_iterator:
names.append(import_match.group(1))
found_lst = []
raw_lst = []
for name in names:
nd = None
for x in self.incpaths:
nd = x.find_resource(name.lower()+'.ml')
if not nd: nd = x.find_resource(name+'.ml')
if nd:
found_lst.append(nd)
break
else:
raw_lst.append(name)
return (found_lst, raw_lst)
native_lst=['native', 'all', 'c_object']
bytecode_lst=['bytecode', 'all']
class ocaml_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('ocaml')
def init_ml(self):
Utils.def_attrs(self,
type = 'all',
incpaths_lst = [],
bld_incpaths_lst = [],
mlltasks = [],
mlytasks = [],
mlitasks = [],
native_tasks = [],
bytecode_tasks = [],
linktasks = [],
bytecode_env = None,
native_env = None,
compiled_tasks = [],
includes = '',
uselib = '',
are_deps_set = 0)
@feature('ocaml')
@after('init_ml')
def init_envs_ml(self):
self.islibrary = getattr(self, 'islibrary', False)
global native_lst, bytecode_lst
self.native_env = None
if self.type in native_lst:
self.native_env = self.env.copy()
if self.islibrary: self.native_env['OCALINKFLAGS'] = '-a'
self.bytecode_env = None
if self.type in bytecode_lst:
self.bytecode_env = self.env.copy()
if self.islibrary: self.bytecode_env['OCALINKFLAGS'] = '-a'
if self.type == 'c_object':
self.native_env.append_unique('OCALINKFLAGS_OPT', '-output-obj')
@feature('ocaml')
@before('apply_vars_ml')
@after('init_envs_ml')
def apply_incpaths_ml(self):
inc_lst = self.includes.split()
lst = self.incpaths_lst
for dir in inc_lst:
node = self.path.find_dir(dir)
if not node:
error("node not found: " + str(dir))
continue
self.bld.rescan(node)
if not node in lst: lst.append(node)
self.bld_incpaths_lst.append(node)
# now the nodes are added to self.incpaths_lst
@feature('ocaml')
@before('apply_core')
def apply_vars_ml(self):
for i in self.incpaths_lst:
if self.bytecode_env:
app = self.bytecode_env.append_value
app('OCAMLPATH', '-I')
app('OCAMLPATH', i.srcpath(self.env))
app('OCAMLPATH', '-I')
app('OCAMLPATH', i.bldpath(self.env))
if self.native_env:
app = self.native_env.append_value
app('OCAMLPATH', '-I')
app('OCAMLPATH', i.bldpath(self.env))
app('OCAMLPATH', '-I')
app('OCAMLPATH', i.srcpath(self.env))
varnames = ['INCLUDES', 'OCAMLFLAGS', 'OCALINKFLAGS', 'OCALINKFLAGS_OPT']
for name in self.uselib.split():
for vname in varnames:
cnt = self.env[vname+'_'+name]
if cnt:
if self.bytecode_env: self.bytecode_env.append_value(vname, cnt)
if self.native_env: self.native_env.append_value(vname, cnt)
@feature('ocaml')
@after('apply_core')
def apply_link_ml(self):
if self.bytecode_env:
ext = self.islibrary and '.cma' or '.run'
linktask = self.create_task('ocalink')
linktask.bytecode = 1
linktask.set_outputs(self.path.find_or_declare(self.target + ext))
linktask.obj = self
linktask.env = self.bytecode_env
self.linktasks.append(linktask)
if self.native_env:
if self.type == 'c_object': ext = '.o'
elif self.islibrary: ext = '.cmxa'
else: ext = ''
linktask = self.create_task('ocalinkx')
linktask.set_outputs(self.path.find_or_declare(self.target + ext))
linktask.obj = self
linktask.env = self.native_env
self.linktasks.append(linktask)
# we produce a .o file to be used by gcc
self.compiled_tasks.append(linktask)
@extension(EXT_MLL)
def mll_hook(self, node):
mll_task = self.create_task('ocamllex', node, node.change_ext('.ml'), env=self.native_env)
self.mlltasks.append(mll_task)
self.allnodes.append(mll_task.outputs[0])
@extension(EXT_MLY)
def mly_hook(self, node):
mly_task = self.create_task('ocamlyacc', node, [node.change_ext('.ml'), node.change_ext('.mli')], env=self.native_env)
self.mlytasks.append(mly_task)
self.allnodes.append(mly_task.outputs[0])
task = self.create_task('ocamlcmi', mly_task.outputs[1], mly_task.outputs[1].change_ext('.cmi'), env=self.native_env)
@extension(EXT_MLI)
def mli_hook(self, node):
task = self.create_task('ocamlcmi', node, node.change_ext('.cmi'), env=self.native_env)
self.mlitasks.append(task)
@extension(EXT_MLC)
def mlc_hook(self, node):
task = self.create_task('ocamlcc', node, node.change_ext('.o'), env=self.native_env)
self.compiled_tasks.append(task)
@extension(EXT_ML)
def ml_hook(self, node):
if self.native_env:
task = self.create_task('ocamlx', node, node.change_ext('.cmx'), env=self.native_env)
task.obj = self
task.incpaths = self.bld_incpaths_lst
self.native_tasks.append(task)
if self.bytecode_env:
task = self.create_task('ocaml', node, node.change_ext('.cmo'), env=self.bytecode_env)
task.obj = self
task.bytecode = 1
task.incpaths = self.bld_incpaths_lst
self.bytecode_tasks.append(task)
def compile_may_start(self):
if not getattr(self, 'flag_deps', ''):
self.flag_deps = 1
# the evil part is that we can only compute the dependencies after the
# source files can be read (this means actually producing the source files)
if getattr(self, 'bytecode', ''): alltasks = self.obj.bytecode_tasks
else: alltasks = self.obj.native_tasks
self.signature() # ensure that files are scanned - unfortunately
tree = self.generator.bld
env = self.env
for node in self.inputs:
lst = tree.node_deps[self.unique_id()]
for depnode in lst:
for t in alltasks:
if t == self: continue
if depnode in t.inputs:
self.set_run_after(t)
# TODO necessary to get the signature right - for now
delattr(self, 'cache_sig')
self.signature()
return Task.Task.runnable_status(self)
b = Task.simple_task_type
cls = b('ocamlx', '${OCAMLOPT} ${OCAMLPATH} ${OCAMLFLAGS} ${INCLUDES} -c -o ${TGT} ${SRC}', color='GREEN', shell=False)
cls.runnable_status = compile_may_start
cls.scan = scan
b = Task.simple_task_type
cls = b('ocaml', '${OCAMLC} ${OCAMLPATH} ${OCAMLFLAGS} ${INCLUDES} -c -o ${TGT} ${SRC}', color='GREEN', shell=False)
cls.runnable_status = compile_may_start
cls.scan = scan
b('ocamlcmi', '${OCAMLC} ${OCAMLPATH} ${INCLUDES} -o ${TGT} -c ${SRC}', color='BLUE', before="ocaml ocamlcc ocamlx")
b('ocamlcc', 'cd ${TGT[0].bld_dir(env)} && ${OCAMLOPT} ${OCAMLFLAGS} ${OCAMLPATH} ${INCLUDES} -c ${SRC[0].abspath(env)}', color='GREEN')
b('ocamllex', '${OCAMLLEX} ${SRC} -o ${TGT}', color='BLUE', before="ocamlcmi ocaml ocamlcc")
b('ocamlyacc', '${OCAMLYACC} -b ${TGT[0].bld_base(env)} ${SRC}', color='BLUE', before="ocamlcmi ocaml ocamlcc")
def link_may_start(self):
if not getattr(self, 'order', ''):
# now reorder the inputs given the task dependencies
if getattr(self, 'bytecode', 0): alltasks = self.obj.bytecode_tasks
else: alltasks = self.obj.native_tasks
# this part is difficult, we do not have a total order on the tasks
# if the dependencies are wrong, this may not stop
seen = []
pendant = []+alltasks
while pendant:
task = pendant.pop(0)
if task in seen: continue
for x in task.run_after:
if not x in seen:
pendant.append(task)
break
else:
seen.append(task)
self.inputs = [x.outputs[0] for x in seen]
self.order = 1
return Task.Task.runnable_status(self)
act = b('ocalink', '${OCAMLC} -o ${TGT} ${INCLUDES} ${OCALINKFLAGS} ${SRC}', color='YELLOW', after="ocaml ocamlcc")
act.runnable_status = link_may_start
act = b('ocalinkx', '${OCAMLOPT} -o ${TGT} ${INCLUDES} ${OCALINKFLAGS_OPT} ${SRC}', color='YELLOW', after="ocamlx ocamlcc")
act.runnable_status = link_may_start
def detect(conf):
opt = conf.find_program('ocamlopt', var='OCAMLOPT')
occ = conf.find_program('ocamlc', var='OCAMLC')
if (not opt) or (not occ):
conf.fatal('The objective caml compiler was not found:\ninstall it or make it available in your PATH')
v = conf.env
v['OCAMLC'] = occ
v['OCAMLOPT'] = opt
v['OCAMLLEX'] = conf.find_program('ocamllex', var='OCAMLLEX')
v['OCAMLYACC'] = conf.find_program('ocamlyacc', var='OCAMLYACC')
v['OCAMLFLAGS'] = ''
v['OCAMLLIB'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
v['LIBPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
v['CPPPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
v['LIB_OCAML'] = 'camlrun'
ntdb-1.0/buildtools/wafadmin/Tools/osx.py 0000664 0000000 0000000 00000013273 12241515307 0020540 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy 2008
"""MacOSX related tools
To compile an executable into a Mac application bundle (a .app), set its 'mac_app' attribute
obj.mac_app = True
To make a bundled shared library (a .bundle), set the 'mac_bundle' attribute:
obj.mac_bundle = True
"""
import os, shutil, sys, platform
import TaskGen, Task, Build, Options, Utils
from TaskGen import taskgen, feature, after, before
from Logs import error, debug
# plist template
app_info = '''
CFBundlePackageType
APPL
CFBundleGetInfoString
Created by Waf
CFBundleSignature
????
NOTE
THIS IS A GENERATED FILE, DO NOT MODIFY
CFBundleExecutable
%s
'''
# see WAF issue 285
# and also http://trac.macports.org/ticket/17059
@feature('cc', 'cxx')
@before('apply_lib_vars')
def set_macosx_deployment_target(self):
if self.env['MACOSX_DEPLOYMENT_TARGET']:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env['MACOSX_DEPLOYMENT_TARGET']
elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
if sys.platform == 'darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2])
@feature('cc', 'cxx')
@after('apply_lib_vars')
def apply_framework(self):
for x in self.to_list(self.env['FRAMEWORKPATH']):
frameworkpath_st = '-F%s'
self.env.append_unique('CXXFLAGS', frameworkpath_st % x)
self.env.append_unique('CCFLAGS', frameworkpath_st % x)
self.env.append_unique('LINKFLAGS', frameworkpath_st % x)
for x in self.to_list(self.env['FRAMEWORK']):
self.env.append_value('LINKFLAGS', ['-framework', x])
@taskgen
def create_bundle_dirs(self, name, out):
bld = self.bld
dir = out.parent.get_dir(name)
if not dir:
dir = out.__class__(name, out.parent, 1)
bld.rescan(dir)
contents = out.__class__('Contents', dir, 1)
bld.rescan(contents)
macos = out.__class__('MacOS', contents, 1)
bld.rescan(macos)
return dir
def bundle_name_for_output(out):
name = out.name
k = name.rfind('.')
if k >= 0:
name = name[:k] + '.app'
else:
name = name + '.app'
return name
@taskgen
@after('apply_link')
@feature('cprogram')
def create_task_macapp(self):
"""Use env['MACAPP'] to force *all* executables to be transformed into Mac applications
or use obj.mac_app = True to build specific targets as Mac apps"""
if self.env['MACAPP'] or getattr(self, 'mac_app', False):
apptask = self.create_task('macapp')
apptask.set_inputs(self.link_task.outputs)
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'MacOS', out.name])
apptask.set_outputs([n1])
apptask.chmod = 0755
apptask.install_path = os.path.join(self.install_path, name, 'Contents', 'MacOS')
self.apptask = apptask
@after('apply_link')
@feature('cprogram')
def create_task_macplist(self):
"""Use env['MACAPP'] to force *all* executables to be transformed into Mac applications
or use obj.mac_app = True to build specific targets as Mac apps"""
if self.env['MACAPP'] or getattr(self, 'mac_app', False):
# check if the user specified a plist before using our template
if not getattr(self, 'mac_plist', False):
self.mac_plist = app_info
plisttask = self.create_task('macplist')
plisttask.set_inputs(self.link_task.outputs)
out = self.link_task.outputs[0]
self.mac_plist = self.mac_plist % (out.name)
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'Info.plist'])
plisttask.set_outputs([n1])
plisttask.mac_plist = self.mac_plist
plisttask.install_path = os.path.join(self.install_path, name, 'Contents')
self.plisttask = plisttask
@after('apply_link')
@feature('cshlib')
def apply_link_osx(self):
name = self.link_task.outputs[0].name
if not self.install_path:
return
if getattr(self, 'vnum', None):
name = name.replace('.dylib', '.%s.dylib' % self.vnum)
path = os.path.join(Utils.subst_vars(self.install_path, self.env), name)
if '-dynamiclib' in self.env['LINKFLAGS']:
self.env.append_value('LINKFLAGS', '-install_name')
self.env.append_value('LINKFLAGS', path)
@before('apply_link', 'apply_lib_vars')
@feature('cc', 'cxx')
def apply_bundle(self):
"""use env['MACBUNDLE'] to force all shlibs into mac bundles
or use obj.mac_bundle = True for specific targets only"""
if not ('cshlib' in self.features or 'shlib' in self.features): return
if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False):
self.env['shlib_PATTERN'] = self.env['macbundle_PATTERN']
uselib = self.uselib = self.to_list(self.uselib)
if not 'MACBUNDLE' in uselib: uselib.append('MACBUNDLE')
@after('apply_link')
@feature('cshlib')
def apply_bundle_remove_dynamiclib(self):
if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False):
if not getattr(self, 'vnum', None):
try:
self.env['LINKFLAGS'].remove('-dynamiclib')
self.env['LINKFLAGS'].remove('-single_module')
except ValueError:
pass
# TODO REMOVE IN 1.6 (global variable)
app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources']
def app_build(task):
env = task.env
shutil.copy2(task.inputs[0].srcpath(env), task.outputs[0].abspath(env))
return 0
def plist_build(task):
env = task.env
f = open(task.outputs[0].abspath(env), "w")
f.write(task.mac_plist)
f.close()
return 0
Task.task_type_from_func('macapp', vars=[], func=app_build, after="cxx_link cc_link static_link")
Task.task_type_from_func('macplist', vars=[], func=plist_build, after="cxx_link cc_link static_link")
ntdb-1.0/buildtools/wafadmin/Tools/perl.py 0000664 0000000 0000000 00000006753 12241515307 0020676 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# andersg at 0x63.nu 2007
import os
import Task, Options, Utils
from Configure import conf
from TaskGen import extension, taskgen, feature, before
xsubpp_str = '${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}'
EXT_XS = ['.xs']
@before('apply_incpaths', 'apply_type_vars', 'apply_lib_vars')
@feature('perlext')
def init_perlext(self):
self.uselib = self.to_list(getattr(self, 'uselib', ''))
if not 'PERL' in self.uselib: self.uselib.append('PERL')
if not 'PERLEXT' in self.uselib: self.uselib.append('PERLEXT')
self.env['shlib_PATTERN'] = self.env['perlext_PATTERN']
@extension(EXT_XS)
def xsubpp_file(self, node):
outnode = node.change_ext('.c')
self.create_task('xsubpp', node, outnode)
self.allnodes.append(outnode)
Task.simple_task_type('xsubpp', xsubpp_str, color='BLUE', before='cc cxx', shell=False)
@conf
def check_perl_version(conf, minver=None):
"""
Checks if perl is installed.
If installed the variable PERL will be set in environment.
Perl binary can be overridden by --with-perl-binary config variable
"""
if getattr(Options.options, 'perlbinary', None):
conf.env.PERL = Options.options.perlbinary
else:
conf.find_program('perl', var='PERL', mandatory=True)
try:
version = Utils.cmd_output([conf.env.PERL, '-e', 'printf "%vd",$^V'])
except:
conf.fatal('could not determine the perl version')
conf.env.PERL_VERSION = version
cver = ''
if minver:
try:
ver = tuple(map(int, version.split('.')))
except:
conf.fatal('unsupported perl version %r' % version)
if ver < minver:
conf.fatal('perl is too old')
cver = '.'.join(map(str,minver))
conf.check_message('perl', cver, True, version)
@conf
def check_perl_module(conf, module):
"""
Check if specified perlmodule is installed.
Minimum version can be specified by specifying it after modulename
like this:
conf.check_perl_module("Some::Module 2.92")
"""
cmd = [conf.env['PERL'], '-e', 'use %s' % module]
r = Utils.pproc.call(cmd, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE) == 0
conf.check_message("perl module %s" % module, "", r)
return r
@conf
def check_perl_ext_devel(conf):
"""
Check for configuration needed to build perl extensions.
Sets different xxx_PERLEXT variables in the environment.
Also sets the ARCHDIR_PERL variable useful as installation path,
which can be overridden by --with-perl-archdir
"""
if not conf.env.PERL:
conf.fatal('perl detection is required first')
def read_out(cmd):
return Utils.to_list(Utils.cmd_output([conf.env.PERL, '-MConfig', '-e', cmd]))
conf.env.LINKFLAGS_PERLEXT = read_out('print $Config{lddlflags}')
conf.env.CPPPATH_PERLEXT = read_out('print "$Config{archlib}/CORE"')
conf.env.CCFLAGS_PERLEXT = read_out('print "$Config{ccflags} $Config{cccdlflags}"')
conf.env.XSUBPP = read_out('print "$Config{privlib}/ExtUtils/xsubpp$Config{exe_ext}"')
conf.env.EXTUTILS_TYPEMAP = read_out('print "$Config{privlib}/ExtUtils/typemap"')
conf.env.perlext_PATTERN = '%s.' + read_out('print $Config{dlext}')[0]
if getattr(Options.options, 'perlarchdir', None):
conf.env.ARCHDIR_PERL = Options.options.perlarchdir
else:
conf.env.ARCHDIR_PERL = read_out('print $Config{sitearch}')[0]
def set_options(opt):
opt.add_option("--with-perl-binary", type="string", dest="perlbinary", help = 'Specify alternate perl binary', default=None)
opt.add_option("--with-perl-archdir", type="string", dest="perlarchdir", help = 'Specify directory where to install arch specific files', default=None)
ntdb-1.0/buildtools/wafadmin/Tools/preproc.py 0000664 0000000 0000000 00000052564 12241515307 0021407 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2009 (ita)
"""
C/C++ preprocessor for finding dependencies
Reasons for using the Waf preprocessor by default
1. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files)
2. Not all compilers provide .d files for obtaining the dependencies (portability)
3. A naive file scanner will not catch the constructs such as "#include foo()"
4. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything)
Regarding the speed concerns:
a. the preprocessing is performed only when files must be compiled
b. the macros are evaluated only for #if/#elif/#include
c. the time penalty is about 10%
d. system headers are not scanned
Now if you do not want the Waf preprocessor, the tool "gccdeps" uses the .d files produced
during the compilation to track the dependencies (useful when used with the boost libraries).
It only works with gcc though, and it cannot be used with Qt builds. A dumb
file scanner will be added in the future, so we will have most bahaviours.
"""
# TODO: more varargs, pragma once
# TODO: dumb file scanner tracking all includes
import re, sys, os, string
import Logs, Build, Utils
from Logs import debug, error
import traceback
class PreprocError(Utils.WafError):
pass
POPFILE = '-'
recursion_limit = 5000
"do not loop too much on header inclusion"
go_absolute = 0
"set to 1 to track headers on files in /usr/include - else absolute paths are ignored"
standard_includes = ['/usr/include']
if sys.platform == "win32":
standard_includes = []
use_trigraphs = 0
'apply the trigraph rules first'
strict_quotes = 0
"Keep <> for system includes (do not search for those includes)"
g_optrans = {
'not':'!',
'and':'&&',
'bitand':'&',
'and_eq':'&=',
'or':'||',
'bitor':'|',
'or_eq':'|=',
'xor':'^',
'xor_eq':'^=',
'compl':'~',
}
"these ops are for c++, to reset, set an empty dict"
# ignore #warning and #error
re_lines = re.compile(\
'^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$',
re.IGNORECASE | re.MULTILINE)
re_mac = re.compile("^[a-zA-Z_]\w*")
re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]')
re_pragma_once = re.compile('^\s*once\s*', re.IGNORECASE)
re_nl = re.compile('\\\\\r*\n', re.MULTILINE)
re_cpp = re.compile(
r"""(/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)|//[^\n]*|("(?:\\.|[^"\\])*"|'(?:\\.|[^'\\])*'|.[^/"'\\]*)""",
re.MULTILINE)
trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')]
chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39}
NUM = 'i'
OP = 'O'
IDENT = 'T'
STR = 's'
CHAR = 'c'
tok_types = [NUM, STR, IDENT, OP]
exp_types = [
r"""0[xX](?P[a-fA-F0-9]+)(?P[uUlL]*)|L*?'(?P(\\.|[^\\'])+)'|(?P\d+)[Ee](?P[+-]*?\d+)(?P[fFlL]*)|(?P\d*\.\d+)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P\d+\.\d*)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P0*)(?P\d+)(?P[uUlL]*)""",
r'L?"([^"\\]|\\.)*"',
r'[a-zA-Z_]\w*',
r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]',
]
re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M)
accepted = 'a'
ignored = 'i'
undefined = 'u'
skipped = 's'
def repl(m):
if m.group(1):
return ' '
s = m.group(2)
if s is None:
return ''
return s
def filter_comments(filename):
# return a list of tuples : keyword, line
code = Utils.readf(filename)
if use_trigraphs:
for (a, b) in trig_def: code = code.split(a).join(b)
code = re_nl.sub('', code)
code = re_cpp.sub(repl, code)
return [(m.group(2), m.group(3)) for m in re.finditer(re_lines, code)]
prec = {}
# op -> number, needed for such expressions: #if 1 && 2 != 0
ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ',']
for x in range(len(ops)):
syms = ops[x]
for u in syms.split():
prec[u] = x
def reduce_nums(val_1, val_2, val_op):
"""apply arithmetic rules and try to return an integer result"""
#print val_1, val_2, val_op
# now perform the operation, make certain a and b are numeric
try: a = 0 + val_1
except TypeError: a = int(val_1)
try: b = 0 + val_2
except TypeError: b = int(val_2)
d = val_op
if d == '%': c = a%b
elif d=='+': c = a+b
elif d=='-': c = a-b
elif d=='*': c = a*b
elif d=='/': c = a/b
elif d=='^': c = a^b
elif d=='|': c = a|b
elif d=='||': c = int(a or b)
elif d=='&': c = a&b
elif d=='&&': c = int(a and b)
elif d=='==': c = int(a == b)
elif d=='!=': c = int(a != b)
elif d=='<=': c = int(a <= b)
elif d=='<': c = int(a < b)
elif d=='>': c = int(a > b)
elif d=='>=': c = int(a >= b)
elif d=='^': c = int(a^b)
elif d=='<<': c = a<>': c = a>>b
else: c = 0
return c
def get_num(lst):
if not lst: raise PreprocError("empty list for get_num")
(p, v) = lst[0]
if p == OP:
if v == '(':
count_par = 1
i = 1
while i < len(lst):
(p, v) = lst[i]
if p == OP:
if v == ')':
count_par -= 1
if count_par == 0:
break
elif v == '(':
count_par += 1
i += 1
else:
raise PreprocError("rparen expected %r" % lst)
(num, _) = get_term(lst[1:i])
return (num, lst[i+1:])
elif v == '+':
return get_num(lst[1:])
elif v == '-':
num, lst = get_num(lst[1:])
return (reduce_nums('-1', num, '*'), lst)
elif v == '!':
num, lst = get_num(lst[1:])
return (int(not int(num)), lst)
elif v == '~':
return (~ int(num), lst)
else:
raise PreprocError("invalid op token %r for get_num" % lst)
elif p == NUM:
return v, lst[1:]
elif p == IDENT:
# all macros should have been replaced, remaining identifiers eval to 0
return 0, lst[1:]
else:
raise PreprocError("invalid token %r for get_num" % lst)
def get_term(lst):
if not lst: raise PreprocError("empty list for get_term")
num, lst = get_num(lst)
if not lst:
return (num, [])
(p, v) = lst[0]
if p == OP:
if v == '&&' and not num:
return (num, [])
elif v == '||' and num:
return (num, [])
elif v == ',':
# skip
return get_term(lst[1:])
elif v == '?':
count_par = 0
i = 1
while i < len(lst):
(p, v) = lst[i]
if p == OP:
if v == ')':
count_par -= 1
elif v == '(':
count_par += 1
elif v == ':':
if count_par == 0:
break
i += 1
else:
raise PreprocError("rparen expected %r" % lst)
if int(num):
return get_term(lst[1:i])
else:
return get_term(lst[i+1:])
else:
num2, lst = get_num(lst[1:])
if not lst:
# no more tokens to process
num2 = reduce_nums(num, num2, v)
return get_term([(NUM, num2)] + lst)
# operator precedence
p2, v2 = lst[0]
if p2 != OP:
raise PreprocError("op expected %r" % lst)
if prec[v2] >= prec[v]:
num2 = reduce_nums(num, num2, v)
return get_term([(NUM, num2)] + lst)
else:
num3, lst = get_num(lst[1:])
num3 = reduce_nums(num2, num3, v2)
return get_term([(NUM, num), (p, v), (NUM, num3)] + lst)
raise PreprocError("cannot reduce %r" % lst)
def reduce_eval(lst):
"""take a list of tokens and output true or false (#if/#elif conditions)"""
num, lst = get_term(lst)
return (NUM, num)
def stringize(lst):
"""use for converting a list of tokens to a string"""
lst = [str(v2) for (p2, v2) in lst]
return "".join(lst)
def paste_tokens(t1, t2):
"""
here is what we can paste:
a ## b -> ab
> ## = -> >=
a ## 2 -> a2
"""
p1 = None
if t1[0] == OP and t2[0] == OP:
p1 = OP
elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM):
p1 = IDENT
elif t1[0] == NUM and t2[0] == NUM:
p1 = NUM
if not p1:
raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2))
return (p1, t1[1] + t2[1])
def reduce_tokens(lst, defs, ban=[]):
"""replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied"""
i = 0
while i < len(lst):
(p, v) = lst[i]
if p == IDENT and v == "defined":
del lst[i]
if i < len(lst):
(p2, v2) = lst[i]
if p2 == IDENT:
if v2 in defs:
lst[i] = (NUM, 1)
else:
lst[i] = (NUM, 0)
elif p2 == OP and v2 == '(':
del lst[i]
(p2, v2) = lst[i]
del lst[i] # remove the ident, and change the ) for the value
if v2 in defs:
lst[i] = (NUM, 1)
else:
lst[i] = (NUM, 0)
else:
raise PreprocError("invalid define expression %r" % lst)
elif p == IDENT and v in defs:
if isinstance(defs[v], str):
a, b = extract_macro(defs[v])
defs[v] = b
macro_def = defs[v]
to_add = macro_def[1]
if isinstance(macro_def[0], list):
# macro without arguments
del lst[i]
for x in xrange(len(to_add)):
lst.insert(i, to_add[x])
i += 1
else:
# collect the arguments for the funcall
args = []
del lst[i]
if i >= len(lst):
raise PreprocError("expected '(' after %r (got nothing)" % v)
(p2, v2) = lst[i]
if p2 != OP or v2 != '(':
raise PreprocError("expected '(' after %r" % v)
del lst[i]
one_param = []
count_paren = 0
while i < len(lst):
p2, v2 = lst[i]
del lst[i]
if p2 == OP and count_paren == 0:
if v2 == '(':
one_param.append((p2, v2))
count_paren += 1
elif v2 == ')':
if one_param: args.append(one_param)
break
elif v2 == ',':
if not one_param: raise PreprocError("empty param in funcall %s" % p)
args.append(one_param)
one_param = []
else:
one_param.append((p2, v2))
else:
one_param.append((p2, v2))
if v2 == '(': count_paren += 1
elif v2 == ')': count_paren -= 1
else:
raise PreprocError('malformed macro')
# substitute the arguments within the define expression
accu = []
arg_table = macro_def[0]
j = 0
while j < len(to_add):
(p2, v2) = to_add[j]
if p2 == OP and v2 == '#':
# stringize is for arguments only
if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
toks = args[arg_table[to_add[j+1][1]]]
accu.append((STR, stringize(toks)))
j += 1
else:
accu.append((p2, v2))
elif p2 == OP and v2 == '##':
# token pasting, how can man invent such a complicated system?
if accu and j+1 < len(to_add):
# we have at least two tokens
t1 = accu[-1]
if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
toks = args[arg_table[to_add[j+1][1]]]
if toks:
accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1])
accu.extend(toks[1:])
else:
# error, case "a##"
accu.append((p2, v2))
accu.extend(toks)
elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__':
# TODO not sure
# first collect the tokens
va_toks = []
st = len(macro_def[0])
pt = len(args)
for x in args[pt-st+1:]:
va_toks.extend(x)
va_toks.append((OP, ','))
if va_toks: va_toks.pop() # extra comma
if len(accu)>1:
(p3, v3) = accu[-1]
(p4, v4) = accu[-2]
if v3 == '##':
# remove the token paste
accu.pop()
if v4 == ',' and pt < st:
# remove the comma
accu.pop()
accu += va_toks
else:
accu[-1] = paste_tokens(t1, to_add[j+1])
j += 1
else:
# invalid paste, case "##a" or "b##"
accu.append((p2, v2))
elif p2 == IDENT and v2 in arg_table:
toks = args[arg_table[v2]]
reduce_tokens(toks, defs, ban+[v])
accu.extend(toks)
else:
accu.append((p2, v2))
j += 1
reduce_tokens(accu, defs, ban+[v])
for x in xrange(len(accu)-1, -1, -1):
lst.insert(i, accu[x])
i += 1
def eval_macro(lst, adefs):
"""reduce the tokens from the list lst, and try to return a 0/1 result"""
reduce_tokens(lst, adefs, [])
if not lst: raise PreprocError("missing tokens to evaluate")
(p, v) = reduce_eval(lst)
return int(v) != 0
def extract_macro(txt):
"""process a macro definition from "#define f(x, y) x * y" into a function or a simple macro without arguments"""
t = tokenize(txt)
if re_fun.search(txt):
p, name = t[0]
p, v = t[1]
if p != OP: raise PreprocError("expected open parenthesis")
i = 1
pindex = 0
params = {}
prev = '('
while 1:
i += 1
p, v = t[i]
if prev == '(':
if p == IDENT:
params[v] = pindex
pindex += 1
prev = p
elif p == OP and v == ')':
break
else:
raise PreprocError("unexpected token (3)")
elif prev == IDENT:
if p == OP and v == ',':
prev = v
elif p == OP and v == ')':
break
else:
raise PreprocError("comma or ... expected")
elif prev == ',':
if p == IDENT:
params[v] = pindex
pindex += 1
prev = p
elif p == OP and v == '...':
raise PreprocError("not implemented (1)")
else:
raise PreprocError("comma or ... expected (2)")
elif prev == '...':
raise PreprocError("not implemented (2)")
else:
raise PreprocError("unexpected else")
#~ print (name, [params, t[i+1:]])
return (name, [params, t[i+1:]])
else:
(p, v) = t[0]
return (v, [[], t[1:]])
re_include = re.compile('^\s*(<(?P.*)>|"(?P.*)")')
def extract_include(txt, defs):
"""process a line in the form "#include foo" to return a string representing the file"""
m = re_include.search(txt)
if m:
if m.group('a'): return '<', m.group('a')
if m.group('b'): return '"', m.group('b')
# perform preprocessing and look at the result, it must match an include
toks = tokenize(txt)
reduce_tokens(toks, defs, ['waf_include'])
if not toks:
raise PreprocError("could not parse include %s" % txt)
if len(toks) == 1:
if toks[0][0] == STR:
return '"', toks[0][1]
else:
if toks[0][1] == '<' and toks[-1][1] == '>':
return stringize(toks).lstrip('<').rstrip('>')
raise PreprocError("could not parse include %s." % txt)
def parse_char(txt):
if not txt: raise PreprocError("attempted to parse a null char")
if txt[0] != '\\':
return ord(txt)
c = txt[1]
if c == 'x':
if len(txt) == 4 and txt[3] in string.hexdigits: return int(txt[2:], 16)
return int(txt[2:], 16)
elif c.isdigit():
if c == '0' and len(txt)==2: return 0
for i in 3, 2, 1:
if len(txt) > i and txt[1:1+i].isdigit():
return (1+i, int(txt[1:1+i], 8))
else:
try: return chr_esc[c]
except KeyError: raise PreprocError("could not parse char literal '%s'" % txt)
@Utils.run_once
def tokenize_private(s):
ret = []
for match in re_clexer.finditer(s):
m = match.group
for name in tok_types:
v = m(name)
if v:
if name == IDENT:
try: v = g_optrans[v]; name = OP
except KeyError:
# c++ specific
if v.lower() == "true":
v = 1
name = NUM
elif v.lower() == "false":
v = 0
name = NUM
elif name == NUM:
if m('oct'): v = int(v, 8)
elif m('hex'): v = int(m('hex'), 16)
elif m('n0'): v = m('n0')
else:
v = m('char')
if v: v = parse_char(v)
else: v = m('n2') or m('n4')
elif name == OP:
if v == '%:': v = '#'
elif v == '%:%:': v = '##'
elif name == STR:
# remove the quotes around the string
v = v[1:-1]
ret.append((name, v))
break
return ret
def tokenize(s):
"""convert a string into a list of tokens (shlex.split does not apply to c/c++/d)"""
return tokenize_private(s)[:]
@Utils.run_once
def define_name(line):
return re_mac.match(line).group(0)
class c_parser(object):
def __init__(self, nodepaths=None, defines=None):
#self.lines = txt.split('\n')
self.lines = []
if defines is None:
self.defs = {}
else:
self.defs = dict(defines) # make a copy
self.state = []
self.env = None # needed for the variant when searching for files
self.count_files = 0
self.currentnode_stack = []
self.nodepaths = nodepaths or []
self.nodes = []
self.names = []
# file added
self.curfile = ''
self.ban_includes = set([])
def cached_find_resource(self, node, filename):
try:
nd = node.bld.cache_nd
except:
nd = node.bld.cache_nd = {}
tup = (node.id, filename)
try:
return nd[tup]
except KeyError:
ret = node.find_resource(filename)
nd[tup] = ret
return ret
def tryfind(self, filename):
self.curfile = filename
# for msvc it should be a for loop on the whole stack
found = self.cached_find_resource(self.currentnode_stack[-1], filename)
for n in self.nodepaths:
if found:
break
found = self.cached_find_resource(n, filename)
if found:
self.nodes.append(found)
if filename[-4:] != '.moc':
self.addlines(found)
else:
if not filename in self.names:
self.names.append(filename)
return found
def addlines(self, node):
self.currentnode_stack.append(node.parent)
filepath = node.abspath(self.env)
self.count_files += 1
if self.count_files > recursion_limit: raise PreprocError("recursion limit exceeded")
pc = self.parse_cache
debug('preproc: reading file %r', filepath)
try:
lns = pc[filepath]
except KeyError:
pass
else:
self.lines.extend(lns)
return
try:
lines = filter_comments(filepath)
lines.append((POPFILE, ''))
lines.reverse()
pc[filepath] = lines # cache the lines filtered
self.lines.extend(lines)
except IOError:
raise PreprocError("could not read the file %s" % filepath)
except Exception:
if Logs.verbose > 0:
error("parsing %s failed" % filepath)
traceback.print_exc()
def start(self, node, env):
debug('preproc: scanning %s (in %s)', node.name, node.parent.name)
self.env = env
variant = node.variant(env)
bld = node.__class__.bld
try:
self.parse_cache = bld.parse_cache
except AttributeError:
bld.parse_cache = {}
self.parse_cache = bld.parse_cache
self.addlines(node)
if env['DEFLINES']:
lst = [('define', x) for x in env['DEFLINES']]
lst.reverse()
self.lines.extend(lst)
while self.lines:
(kind, line) = self.lines.pop()
if kind == POPFILE:
self.currentnode_stack.pop()
continue
try:
self.process_line(kind, line)
except Exception, e:
if Logs.verbose:
debug('preproc: line parsing failed (%s): %s %s', e, line, Utils.ex_stack())
def process_line(self, token, line):
"""
WARNING: a new state must be added for if* because the endif
"""
ve = Logs.verbose
if ve: debug('preproc: line is %s - %s state is %s', token, line, self.state)
state = self.state
# make certain we define the state if we are about to enter in an if block
if token in ['ifdef', 'ifndef', 'if']:
state.append(undefined)
elif token == 'endif':
state.pop()
# skip lines when in a dead 'if' branch, wait for the endif
if not token in ['else', 'elif', 'endif']:
if skipped in self.state or ignored in self.state:
return
if token == 'if':
ret = eval_macro(tokenize(line), self.defs)
if ret: state[-1] = accepted
else: state[-1] = ignored
elif token == 'ifdef':
m = re_mac.match(line)
if m and m.group(0) in self.defs: state[-1] = accepted
else: state[-1] = ignored
elif token == 'ifndef':
m = re_mac.match(line)
if m and m.group(0) in self.defs: state[-1] = ignored
else: state[-1] = accepted
elif token == 'include' or token == 'import':
(kind, inc) = extract_include(line, self.defs)
if inc in self.ban_includes: return
if token == 'import': self.ban_includes.add(inc)
if ve: debug('preproc: include found %s (%s) ', inc, kind)
if kind == '"' or not strict_quotes:
self.tryfind(inc)
elif token == 'elif':
if state[-1] == accepted:
state[-1] = skipped
elif state[-1] == ignored:
if eval_macro(tokenize(line), self.defs):
state[-1] = accepted
elif token == 'else':
if state[-1] == accepted: state[-1] = skipped
elif state[-1] == ignored: state[-1] = accepted
elif token == 'define':
try:
self.defs[define_name(line)] = line
except:
raise PreprocError("invalid define line %s" % line)
elif token == 'undef':
m = re_mac.match(line)
if m and m.group(0) in self.defs:
self.defs.__delitem__(m.group(0))
#print "undef %s" % name
elif token == 'pragma':
if re_pragma_once.match(line.lower()):
self.ban_includes.add(self.curfile)
def get_deps(node, env, nodepaths=[]):
"""
Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind
#include some_macro()
"""
gruik = c_parser(nodepaths)
gruik.start(node, env)
return (gruik.nodes, gruik.names)
#################### dumb dependency scanner
re_inc = re.compile(\
'^[ \t]*(#|%:)[ \t]*(include)[ \t]*(.*)\r*$',
re.IGNORECASE | re.MULTILINE)
def lines_includes(filename):
code = Utils.readf(filename)
if use_trigraphs:
for (a, b) in trig_def: code = code.split(a).join(b)
code = re_nl.sub('', code)
code = re_cpp.sub(repl, code)
return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)]
def get_deps_simple(node, env, nodepaths=[], defines={}):
"""
Get the dependencies by just looking recursively at the #include statements
"""
nodes = []
names = []
def find_deps(node):
lst = lines_includes(node.abspath(env))
for (_, line) in lst:
(t, filename) = extract_include(line, defines)
if filename in names:
continue
if filename.endswith('.moc'):
names.append(filename)
found = None
for n in nodepaths:
if found:
break
found = n.find_resource(filename)
if not found:
if not filename in names:
names.append(filename)
elif not found in nodes:
nodes.append(found)
find_deps(node)
find_deps(node)
return (nodes, names)
ntdb-1.0/buildtools/wafadmin/Tools/python.py 0000664 0000000 0000000 00000033277 12241515307 0021256 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2007 (ita)
# Gustavo Carneiro (gjc), 2007
"Python support"
import os, sys
import TaskGen, Utils, Options
from Logs import debug, warn, info
from TaskGen import extension, before, after, feature
from Configure import conf
from config_c import parse_flags
EXT_PY = ['.py']
FRAG_2 = '''
#include "Python.h"
#ifdef __cplusplus
extern "C" {
#endif
void Py_Initialize(void);
void Py_Finalize(void);
#ifdef __cplusplus
}
#endif
int main()
{
Py_Initialize();
Py_Finalize();
return 0;
}
'''
@feature('pyext')
@before('apply_incpaths', 'apply_lib_vars', 'apply_type_vars', 'apply_bundle')
@after('vars_target_cshlib')
def init_pyext(self):
self.default_install_path = '${PYTHONARCHDIR}'
self.uselib = self.to_list(getattr(self, 'uselib', ''))
if not 'PYEXT' in self.uselib:
self.uselib.append('PYEXT')
self.env['MACBUNDLE'] = True
@before('apply_link', 'apply_lib_vars', 'apply_type_vars')
@after('apply_bundle')
@feature('pyext')
def pyext_shlib_ext(self):
# override shlib_PATTERN set by the osx module
self.env['shlib_PATTERN'] = self.env['pyext_PATTERN']
@before('apply_incpaths', 'apply_lib_vars', 'apply_type_vars')
@feature('pyembed')
def init_pyembed(self):
self.uselib = self.to_list(getattr(self, 'uselib', ''))
if not 'PYEMBED' in self.uselib:
self.uselib.append('PYEMBED')
@extension(EXT_PY)
def process_py(self, node):
if not (self.bld.is_install and self.install_path):
return
def inst_py(ctx):
install_pyfile(self, node)
self.bld.add_post_fun(inst_py)
def install_pyfile(self, node):
path = self.bld.get_install_path(self.install_path + os.sep + node.name, self.env)
self.bld.install_files(self.install_path, [node], self.env, self.chmod, postpone=False)
if self.bld.is_install < 0:
info("* removing byte compiled python files")
for x in 'co':
try:
os.remove(path + x)
except OSError:
pass
if self.bld.is_install > 0:
if self.env['PYC'] or self.env['PYO']:
info("* byte compiling %r" % path)
if self.env['PYC']:
program = ("""
import sys, py_compile
for pyfile in sys.argv[1:]:
py_compile.compile(pyfile, pyfile + 'c')
""")
argv = [self.env['PYTHON'], '-c', program, path]
ret = Utils.pproc.Popen(argv).wait()
if ret:
raise Utils.WafError('bytecode compilation failed %r' % path)
if self.env['PYO']:
program = ("""
import sys, py_compile
for pyfile in sys.argv[1:]:
py_compile.compile(pyfile, pyfile + 'o')
""")
argv = [self.env['PYTHON'], self.env['PYFLAGS_OPT'], '-c', program, path]
ret = Utils.pproc.Popen(argv).wait()
if ret:
raise Utils.WafError('bytecode compilation failed %r' % path)
# COMPAT
class py_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@before('apply_core')
@after('vars_target_cprogram', 'vars_target_cshlib')
@feature('py')
def init_py(self):
self.default_install_path = '${PYTHONDIR}'
def _get_python_variables(python_exe, variables, imports=['import sys']):
"""Run a python interpreter and print some variables"""
program = list(imports)
program.append('')
for v in variables:
program.append("print(repr(%s))" % v)
os_env = dict(os.environ)
try:
del os_env['MACOSX_DEPLOYMENT_TARGET'] # see comments in the OSX tool
except KeyError:
pass
proc = Utils.pproc.Popen([python_exe, "-c", '\n'.join(program)], stdout=Utils.pproc.PIPE, env=os_env)
output = proc.communicate()[0].split("\n") # do not touch, python3
if proc.returncode:
if Options.options.verbose:
warn("Python program to extract python configuration variables failed:\n%s"
% '\n'.join(["line %03i: %s" % (lineno+1, line) for lineno, line in enumerate(program)]))
raise RuntimeError
return_values = []
for s in output:
s = s.strip()
if not s:
continue
if s == 'None':
return_values.append(None)
elif (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'):
return_values.append(eval(s))
elif s[0].isdigit():
return_values.append(int(s))
else: break
return return_values
@conf
def check_python_headers(conf, mandatory=True):
"""Check for headers and libraries necessary to extend or embed python.
On success the environment variables xxx_PYEXT and xxx_PYEMBED are added for uselib
PYEXT: for compiling python extensions
PYEMBED: for embedding a python interpreter"""
if not conf.env['CC_NAME'] and not conf.env['CXX_NAME']:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not conf.env['PYTHON_VERSION']:
conf.check_python_version()
env = conf.env
python = env['PYTHON']
if not python:
conf.fatal('could not find the python executable')
## On Mac OSX we need to use mac bundles for python plugins
if Options.platform == 'darwin':
conf.check_tool('osx')
try:
# Get some python configuration variables using distutils
v = 'prefix SO SYSLIBS LDFLAGS SHLIBS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET'.split()
(python_prefix, python_SO, python_SYSLIBS, python_LDFLAGS, python_SHLIBS,
python_LIBDIR, python_LIBPL, INCLUDEPY, Py_ENABLE_SHARED,
python_MACOSX_DEPLOYMENT_TARGET) = \
_get_python_variables(python, ["get_config_var('%s') or ''" % x for x in v],
['from distutils.sysconfig import get_config_var'])
except RuntimeError:
conf.fatal("Python development headers not found (-v for details).")
conf.log.write("""Configuration returned from %r:
python_prefix = %r
python_SO = %r
python_SYSLIBS = %r
python_LDFLAGS = %r
python_SHLIBS = %r
python_LIBDIR = %r
python_LIBPL = %r
INCLUDEPY = %r
Py_ENABLE_SHARED = %r
MACOSX_DEPLOYMENT_TARGET = %r
""" % (python, python_prefix, python_SO, python_SYSLIBS, python_LDFLAGS, python_SHLIBS,
python_LIBDIR, python_LIBPL, INCLUDEPY, Py_ENABLE_SHARED, python_MACOSX_DEPLOYMENT_TARGET))
if python_MACOSX_DEPLOYMENT_TARGET:
conf.env['MACOSX_DEPLOYMENT_TARGET'] = python_MACOSX_DEPLOYMENT_TARGET
conf.environ['MACOSX_DEPLOYMENT_TARGET'] = python_MACOSX_DEPLOYMENT_TARGET
env['pyext_PATTERN'] = '%s'+python_SO
# Check for python libraries for embedding
if python_SYSLIBS is not None:
for lib in python_SYSLIBS.split():
if lib.startswith('-l'):
lib = lib[2:] # strip '-l'
env.append_value('LIB_PYEMBED', lib)
if python_SHLIBS is not None:
for lib in python_SHLIBS.split():
if lib.startswith('-l'):
env.append_value('LIB_PYEMBED', lib[2:]) # strip '-l'
else:
env.append_value('LINKFLAGS_PYEMBED', lib)
if Options.platform != 'darwin' and python_LDFLAGS:
parse_flags(python_LDFLAGS, 'PYEMBED', env)
result = False
name = 'python' + env['PYTHON_VERSION']
if python_LIBDIR is not None:
path = [python_LIBDIR]
conf.log.write("\n\n# Trying LIBDIR: %r\n" % path)
result = conf.check(lib=name, uselib='PYEMBED', libpath=path)
if not result and python_LIBPL is not None:
conf.log.write("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n")
path = [python_LIBPL]
result = conf.check(lib=name, uselib='PYEMBED', libpath=path)
if not result:
conf.log.write("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
path = [os.path.join(python_prefix, "libs")]
name = 'python' + env['PYTHON_VERSION'].replace('.', '')
result = conf.check(lib=name, uselib='PYEMBED', libpath=path)
if result:
env['LIBPATH_PYEMBED'] = path
env.append_value('LIB_PYEMBED', name)
else:
conf.log.write("\n\n### LIB NOT FOUND\n")
# under certain conditions, python extensions must link to
# python libraries, not just python embedding programs.
if (sys.platform == 'win32' or sys.platform.startswith('os2')
or sys.platform == 'darwin' or Py_ENABLE_SHARED):
env['LIBPATH_PYEXT'] = env['LIBPATH_PYEMBED']
env['LIB_PYEXT'] = env['LIB_PYEMBED']
# We check that pythonX.Y-config exists, and if it exists we
# use it to get only the includes, else fall back to distutils.
python_config = conf.find_program(
'python%s-config' % ('.'.join(env['PYTHON_VERSION'].split('.')[:2])),
var='PYTHON_CONFIG')
if not python_config:
python_config = conf.find_program(
'python-config-%s' % ('.'.join(env['PYTHON_VERSION'].split('.')[:2])),
var='PYTHON_CONFIG')
includes = []
if python_config:
for incstr in Utils.cmd_output("%s --includes" % (python_config,)).strip().split():
# strip the -I or /I
if (incstr.startswith('-I')
or incstr.startswith('/I')):
incstr = incstr[2:]
# append include path, unless already given
if incstr not in includes:
includes.append(incstr)
conf.log.write("Include path for Python extensions "
"(found via python-config --includes): %r\n" % (includes,))
env['CPPPATH_PYEXT'] = includes
env['CPPPATH_PYEMBED'] = includes
else:
conf.log.write("Include path for Python extensions "
"(found via distutils module): %r\n" % (INCLUDEPY,))
env['CPPPATH_PYEXT'] = [INCLUDEPY]
env['CPPPATH_PYEMBED'] = [INCLUDEPY]
# Code using the Python API needs to be compiled with -fno-strict-aliasing
if env['CC_NAME'] == 'gcc':
env.append_value('CCFLAGS_PYEMBED', '-fno-strict-aliasing')
env.append_value('CCFLAGS_PYEXT', '-fno-strict-aliasing')
if env['CXX_NAME'] == 'gcc':
env.append_value('CXXFLAGS_PYEMBED', '-fno-strict-aliasing')
env.append_value('CXXFLAGS_PYEXT', '-fno-strict-aliasing')
# See if it compiles
conf.check(define_name='HAVE_PYTHON_H',
uselib='PYEMBED', fragment=FRAG_2,
errmsg='Could not find the python development headers', mandatory=mandatory)
@conf
def check_python_version(conf, minver=None):
"""
Check if the python interpreter is found matching a given minimum version.
minver should be a tuple, eg. to check for python >= 2.4.2 pass (2,4,2) as minver.
If successful, PYTHON_VERSION is defined as 'MAJOR.MINOR'
(eg. '2.4') of the actual python version found, and PYTHONDIR is
defined, pointing to the site-packages directory appropriate for
this python version, where modules/packages/extensions should be
installed.
"""
assert minver is None or isinstance(minver, tuple)
python = conf.env['PYTHON']
if not python:
conf.fatal('could not find the python executable')
# Get python version string
cmd = [python, "-c", "import sys\nfor x in sys.version_info: print(str(x))"]
debug('python: Running python command %r' % cmd)
proc = Utils.pproc.Popen(cmd, stdout=Utils.pproc.PIPE, shell=False)
lines = proc.communicate()[0].split()
assert len(lines) == 5, "found %i lines, expected 5: %r" % (len(lines), lines)
pyver_tuple = (int(lines[0]), int(lines[1]), int(lines[2]), lines[3], int(lines[4]))
# compare python version with the minimum required
result = (minver is None) or (pyver_tuple >= minver)
if result:
# define useful environment variables
pyver = '.'.join([str(x) for x in pyver_tuple[:2]])
conf.env['PYTHON_VERSION'] = pyver
if 'PYTHONDIR' in conf.environ:
pydir = conf.environ['PYTHONDIR']
else:
if sys.platform == 'win32':
(python_LIBDEST, pydir) = \
_get_python_variables(python,
["get_config_var('LIBDEST') or ''",
"get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']],
['from distutils.sysconfig import get_config_var, get_python_lib'])
else:
python_LIBDEST = None
(pydir,) = \
_get_python_variables(python,
["get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']],
['from distutils.sysconfig import get_config_var, get_python_lib'])
if python_LIBDEST is None:
if conf.env['LIBDIR']:
python_LIBDEST = os.path.join(conf.env['LIBDIR'], "python" + pyver)
else:
python_LIBDEST = os.path.join(conf.env['PREFIX'], "lib", "python" + pyver)
if 'PYTHONARCHDIR' in conf.environ:
pyarchdir = conf.environ['PYTHONARCHDIR']
else:
(pyarchdir,) = _get_python_variables(python,
["get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']],
['from distutils.sysconfig import get_config_var, get_python_lib'])
if not pyarchdir:
pyarchdir = pydir
if hasattr(conf, 'define'): # conf.define is added by the C tool, so may not exist
conf.define('PYTHONDIR', pydir)
conf.define('PYTHONARCHDIR', pyarchdir)
conf.env['PYTHONDIR'] = pydir
# Feedback
pyver_full = '.'.join(map(str, pyver_tuple[:3]))
if minver is None:
conf.check_message_custom('Python version', '', pyver_full)
else:
minver_str = '.'.join(map(str, minver))
conf.check_message('Python version', ">= %s" % minver_str, result, option=pyver_full)
if not result:
conf.fatal('The python version is too old (%r)' % pyver_full)
@conf
def check_python_module(conf, module_name):
"""
Check if the selected python interpreter can import the given python module.
"""
result = not Utils.pproc.Popen([conf.env['PYTHON'], "-c", "import %s" % module_name],
stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE).wait()
conf.check_message('Python module', module_name, result)
if not result:
conf.fatal('Could not find the python module %r' % module_name)
def detect(conf):
if not conf.env.PYTHON:
conf.env.PYTHON = sys.executable
python = conf.find_program('python', var='PYTHON')
if not python:
conf.fatal('Could not find the path of the python executable')
if conf.env.PYTHON != sys.executable:
warn("python executable '%s' different from sys.executable '%s'" % (conf.env.PYTHON, sys.executable))
v = conf.env
v['PYCMD'] = '"import sys, py_compile;py_compile.compile(sys.argv[1], sys.argv[2])"'
v['PYFLAGS'] = ''
v['PYFLAGS_OPT'] = '-O'
v['PYC'] = getattr(Options.options, 'pyc', 1)
v['PYO'] = getattr(Options.options, 'pyo', 1)
def set_options(opt):
opt.add_option('--nopyc',
action='store_false',
default=1,
help = 'Do not install bytecode compiled .pyc files (configuration) [Default:install]',
dest = 'pyc')
opt.add_option('--nopyo',
action='store_false',
default=1,
help='Do not install optimised compiled .pyo files (configuration) [Default:install]',
dest='pyo')
ntdb-1.0/buildtools/wafadmin/Tools/qt4.py 0000664 0000000 0000000 00000034520 12241515307 0020435 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"""
Qt4 support
If QT4_ROOT is given (absolute path), the configuration will look in it first
This module also demonstrates how to add tasks dynamically (when the build has started)
"""
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml = False
ContentHandler = object
else:
has_xml = True
import os, sys
import ccroot, cxx
import TaskGen, Task, Utils, Runner, Options, Node, Configure
from TaskGen import taskgen, feature, after, extension
from Logs import error
from Constants import *
MOC_H = ['.h', '.hpp', '.hxx', '.hh']
EXT_RCC = ['.qrc']
EXT_UI = ['.ui']
EXT_QT4 = ['.cpp', '.cc', '.cxx', '.C']
class qxx_task(Task.Task):
"A cpp task that may create a moc task dynamically"
before = ['cxx_link', 'static_link']
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
self.moc_done = 0
def scan(self):
(nodes, names) = ccroot.scan(self)
# for some reasons (variants) the moc node may end in the list of node deps
for x in nodes:
if x.name.endswith('.moc'):
nodes.remove(x)
names.append(x.relpath_gen(self.inputs[0].parent))
return (nodes, names)
def runnable_status(self):
if self.moc_done:
# if there is a moc task, delay the computation of the file signature
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
# the moc file enters in the dependency calculation
# so we need to recompute the signature when the moc file is present
self.signature()
return Task.Task.runnable_status(self)
else:
# yes, really, there are people who generate cxx files
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
self.add_moc_tasks()
return ASK_LATER
def add_moc_tasks(self):
node = self.inputs[0]
tree = node.__class__.bld
try:
# compute the signature once to know if there is a moc file to create
self.signature()
except KeyError:
# the moc file may be referenced somewhere else
pass
else:
# remove the signature, it must be recomputed with the moc task
delattr(self, 'cache_sig')
moctasks=[]
mocfiles=[]
variant = node.variant(self.env)
try:
tmp_lst = tree.raw_deps[self.unique_id()]
tree.raw_deps[self.unique_id()] = []
except KeyError:
tmp_lst = []
for d in tmp_lst:
if not d.endswith('.moc'): continue
# paranoid check
if d in mocfiles:
error("paranoia owns")
continue
# process that base.moc only once
mocfiles.append(d)
# find the extension (performed only when the .cpp has changes)
base2 = d[:-4]
for path in [node.parent] + self.generator.env['INC_PATHS']:
tree.rescan(path)
vals = getattr(Options.options, 'qt_header_ext', '') or MOC_H
for ex in vals:
h_node = path.find_resource(base2 + ex)
if h_node:
break
else:
continue
break
else:
raise Utils.WafError("no header found for %s which is a moc file" % str(d))
m_node = h_node.change_ext('.moc')
tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), m_node.name)] = h_node
# create the task
task = Task.TaskBase.classes['moc'](self.env, normal=0)
task.set_inputs(h_node)
task.set_outputs(m_node)
generator = tree.generator
generator.outstanding.insert(0, task)
generator.total += 1
moctasks.append(task)
# remove raw deps except the moc files to save space (optimization)
tmp_lst = tree.raw_deps[self.unique_id()] = mocfiles
# look at the file inputs, it is set right above
lst = tree.node_deps.get(self.unique_id(), ())
for d in lst:
name = d.name
if name.endswith('.moc'):
task = Task.TaskBase.classes['moc'](self.env, normal=0)
task.set_inputs(tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), name)]) # 1st element in a tuple
task.set_outputs(d)
generator = tree.generator
generator.outstanding.insert(0, task)
generator.total += 1
moctasks.append(task)
# simple scheduler dependency: run the moc task before others
self.run_after = moctasks
self.moc_done = 1
run = Task.TaskBase.classes['cxx'].__dict__['run']
def translation_update(task):
outs = [a.abspath(task.env) for a in task.outputs]
outs = " ".join(outs)
lupdate = task.env['QT_LUPDATE']
for x in task.inputs:
file = x.abspath(task.env)
cmd = "%s %s -ts %s" % (lupdate, file, outs)
Utils.pprint('BLUE', cmd)
task.generator.bld.exec_command(cmd)
class XMLHandler(ContentHandler):
def __init__(self):
self.buf = []
self.files = []
def startElement(self, name, attrs):
if name == 'file':
self.buf = []
def endElement(self, name):
if name == 'file':
self.files.append(''.join(self.buf))
def characters(self, cars):
self.buf.append(cars)
def scan(self):
"add the dependency on the files referenced in the qrc"
node = self.inputs[0]
parser = make_parser()
curHandler = XMLHandler()
parser.setContentHandler(curHandler)
fi = open(self.inputs[0].abspath(self.env))
parser.parse(fi)
fi.close()
nodes = []
names = []
root = self.inputs[0].parent
for x in curHandler.files:
nd = root.find_resource(x)
if nd: nodes.append(nd)
else: names.append(x)
return (nodes, names)
@extension(EXT_RCC)
def create_rcc_task(self, node):
"hook for rcc files"
rcnode = node.change_ext('_rc.cpp')
rcctask = self.create_task('rcc', node, rcnode)
cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o'))
self.compiled_tasks.append(cpptask)
return cpptask
@extension(EXT_UI)
def create_uic_task(self, node):
"hook for uic tasks"
uictask = self.create_task('ui4', node)
uictask.outputs = [self.path.find_or_declare(self.env['ui_PATTERN'] % node.name[:-3])]
return uictask
class qt4_taskgen(cxx.cxx_taskgen):
def __init__(self, *k, **kw):
cxx.cxx_taskgen.__init__(self, *k, **kw)
self.features.append('qt4')
@extension('.ts')
def add_lang(self, node):
"""add all the .ts file into self.lang"""
self.lang = self.to_list(getattr(self, 'lang', [])) + [node]
@feature('qt4')
@after('apply_link')
def apply_qt4(self):
if getattr(self, 'lang', None):
update = getattr(self, 'update', None)
lst=[]
trans=[]
for l in self.to_list(self.lang):
if not isinstance(l, Node.Node):
l = self.path.find_resource(l+'.ts')
t = self.create_task('ts2qm', l, l.change_ext('.qm'))
lst.append(t.outputs[0])
if update:
trans.append(t.inputs[0])
trans_qt4 = getattr(Options.options, 'trans_qt4', False)
if update and trans_qt4:
# we need the cpp files given, except the rcc task we create after
# FIXME may be broken
u = Task.TaskCmd(translation_update, self.env, 2)
u.inputs = [a.inputs[0] for a in self.compiled_tasks]
u.outputs = trans
if getattr(self, 'langname', None):
t = Task.TaskBase.classes['qm2rcc'](self.env)
t.set_inputs(lst)
t.set_outputs(self.path.find_or_declare(self.langname+'.qrc'))
t.path = self.path
k = create_rcc_task(self, t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
self.env.append_value('MOC_FLAGS', self.env._CXXDEFFLAGS)
self.env.append_value('MOC_FLAGS', self.env._CXXINCFLAGS)
@extension(EXT_QT4)
def cxx_hook(self, node):
# create the compilation task: cpp or cc
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
task = self.create_task('qxx', node, node.change_ext(obj_ext))
self.compiled_tasks.append(task)
return task
def process_qm2rcc(task):
outfile = task.outputs[0].abspath(task.env)
f = open(outfile, 'w')
f.write('\n\n')
for k in task.inputs:
f.write(' ')
#f.write(k.name)
f.write(k.path_to_parent(task.path))
f.write('\n')
f.write('\n')
f.close()
b = Task.simple_task_type
b('moc', '${QT_MOC} ${MOC_FLAGS} ${SRC} ${MOC_ST} ${TGT}', color='BLUE', vars=['QT_MOC', 'MOC_FLAGS'], shell=False)
cls = b('rcc', '${QT_RCC} -name ${SRC[0].name} ${SRC[0].abspath(env)} ${RCC_ST} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', after="qm2rcc", shell=False)
cls.scan = scan
b('ui4', '${QT_UIC} ${SRC} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', shell=False)
b('ts2qm', '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}', color='BLUE', before='qm2rcc', shell=False)
Task.task_type_from_func('qm2rcc', vars=[], func=process_qm2rcc, color='BLUE', before='rcc', after='ts2qm')
def detect_qt4(conf):
env = conf.env
opt = Options.options
qtdir = getattr(opt, 'qtdir', '')
qtbin = getattr(opt, 'qtbin', '')
qtlibs = getattr(opt, 'qtlibs', '')
useframework = getattr(opt, 'use_qt4_osxframework', True)
paths = []
# the path to qmake has been given explicitely
if qtbin:
paths = [qtbin]
# the qt directory has been given - we deduce the qt binary path
if not qtdir:
qtdir = conf.environ.get('QT4_ROOT', '')
qtbin = os.path.join(qtdir, 'bin')
paths = [qtbin]
# no qtdir, look in the path and in /usr/local/Trolltech
if not qtdir:
paths = os.environ.get('PATH', '').split(os.pathsep)
paths.append('/usr/share/qt4/bin/')
try:
lst = os.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
# keep the highest version
qtdir = '/usr/local/Trolltech/%s/' % lst[0]
qtbin = os.path.join(qtdir, 'bin')
paths.append(qtbin)
# at the end, try to find qmake in the paths given
# keep the one with the highest version
cand = None
prev_ver = ['4', '0', '0']
for qmk in ['qmake-qt4', 'qmake4', 'qmake']:
qmake = conf.find_program(qmk, path_list=paths)
if qmake:
try:
version = Utils.cmd_output([qmake, '-query', 'QT_VERSION']).strip()
except ValueError:
pass
else:
if version:
new_ver = version.split('.')
if new_ver > prev_ver:
cand = qmake
prev_ver = new_ver
if cand:
qmake = cand
else:
conf.fatal('could not find qmake for qt4')
conf.env.QMAKE = qmake
qtincludes = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_HEADERS']).strip()
qtdir = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_PREFIX']).strip() + os.sep
qtbin = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_BINS']).strip() + os.sep
if not qtlibs:
try:
qtlibs = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_LIBS']).strip() + os.sep
except ValueError:
qtlibs = os.path.join(qtdir, 'lib')
def find_bin(lst, var):
for f in lst:
ret = conf.find_program(f, path_list=paths)
if ret:
env[var]=ret
break
vars = "QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtWebKit Qt3Support".split()
find_bin(['uic-qt3', 'uic3'], 'QT_UIC3')
find_bin(['uic-qt4', 'uic'], 'QT_UIC')
if not env['QT_UIC']:
conf.fatal('cannot find the uic compiler for qt4')
try:
version = Utils.cmd_output(env['QT_UIC'] + " -version 2>&1").strip()
except ValueError:
conf.fatal('your uic compiler is for qt3, add uic for qt4 to your path')
version = version.replace('Qt User Interface Compiler ','')
version = version.replace('User Interface Compiler for Qt', '')
if version.find(" 3.") != -1:
conf.check_message('uic version', '(too old)', 0, option='(%s)'%version)
sys.exit(1)
conf.check_message('uic version', '', 1, option='(%s)'%version)
find_bin(['moc-qt4', 'moc'], 'QT_MOC')
find_bin(['rcc'], 'QT_RCC')
find_bin(['lrelease-qt4', 'lrelease'], 'QT_LRELEASE')
find_bin(['lupdate-qt4', 'lupdate'], 'QT_LUPDATE')
env['UIC3_ST']= '%s -o %s'
env['UIC_ST'] = '%s -o %s'
env['MOC_ST'] = '-o'
env['ui_PATTERN'] = 'ui_%s.h'
env['QT_LRELEASE_FLAGS'] = ['-silent']
vars_debug = [a+'_debug' for a in vars]
try:
conf.find_program('pkg-config', var='pkgconfig', path_list=paths, mandatory=True)
except Configure.ConfigurationError:
for lib in vars_debug+vars:
uselib = lib.upper()
d = (lib.find('_debug') > 0) and 'd' or ''
# original author seems to prefer static to shared libraries
for (pat, kind) in ((conf.env.staticlib_PATTERN, 'STATIC'), (conf.env.shlib_PATTERN, '')):
conf.check_message_1('Checking for %s %s' % (lib, kind))
for ext in ['', '4']:
path = os.path.join(qtlibs, pat % (lib + d + ext))
if os.path.exists(path):
env.append_unique(kind + 'LIB_' + uselib, lib + d + ext)
conf.check_message_2('ok ' + path, 'GREEN')
break
path = os.path.join(qtbin, pat % (lib + d + ext))
if os.path.exists(path):
env.append_unique(kind + 'LIB_' + uselib, lib + d + ext)
conf.check_message_2('ok ' + path, 'GREEN')
break
else:
conf.check_message_2('not found', 'YELLOW')
continue
break
env.append_unique('LIBPATH_' + uselib, qtlibs)
env.append_unique('CPPPATH_' + uselib, qtincludes)
env.append_unique('CPPPATH_' + uselib, qtincludes + os.sep + lib)
else:
for i in vars_debug+vars:
try:
conf.check_cfg(package=i, args='--cflags --libs --silence-errors', path=conf.env.pkgconfig)
except ValueError:
pass
# the libpaths are set nicely, unfortunately they make really long command-lines
# remove the qtcore ones from qtgui, etc
def process_lib(vars_, coreval):
for d in vars_:
var = d.upper()
if var == 'QTCORE': continue
value = env['LIBPATH_'+var]
if value:
core = env[coreval]
accu = []
for lib in value:
if lib in core: continue
accu.append(lib)
env['LIBPATH_'+var] = accu
process_lib(vars, 'LIBPATH_QTCORE')
process_lib(vars_debug, 'LIBPATH_QTCORE_DEBUG')
# rpath if wanted
want_rpath = getattr(Options.options, 'want_rpath', 1)
if want_rpath:
def process_rpath(vars_, coreval):
for d in vars_:
var = d.upper()
value = env['LIBPATH_'+var]
if value:
core = env[coreval]
accu = []
for lib in value:
if var != 'QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var] = accu
process_rpath(vars, 'LIBPATH_QTCORE')
process_rpath(vars_debug, 'LIBPATH_QTCORE_DEBUG')
env['QTLOCALE'] = str(env['PREFIX'])+'/share/locale'
def detect(conf):
detect_qt4(conf)
def set_options(opt):
opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
opt.add_option('--header-ext',
type='string',
default='',
help='header extension for moc files',
dest='qt_header_ext')
for i in 'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i, type='string', default='', dest=i)
if sys.platform == "darwin":
opt.add_option('--no-qt4-framework', action="store_false", help='do not use the framework version of Qt4 in OS X', dest='use_qt4_osxframework',default=True)
opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt4", default=False)
ntdb-1.0/buildtools/wafadmin/Tools/ruby.py 0000664 0000000 0000000 00000007277 12241515307 0020717 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# daniel.svensson at purplescout.se 2008
import os
import Task, Options, Utils
from TaskGen import before, feature, after
from Configure import conf
@feature('rubyext')
@before('apply_incpaths', 'apply_type_vars', 'apply_lib_vars', 'apply_bundle')
@after('default_cc', 'vars_target_cshlib')
def init_rubyext(self):
self.default_install_path = '${ARCHDIR_RUBY}'
self.uselib = self.to_list(getattr(self, 'uselib', ''))
if not 'RUBY' in self.uselib:
self.uselib.append('RUBY')
if not 'RUBYEXT' in self.uselib:
self.uselib.append('RUBYEXT')
@feature('rubyext')
@before('apply_link')
def apply_ruby_so_name(self):
self.env['shlib_PATTERN'] = self.env['rubyext_PATTERN']
@conf
def check_ruby_version(conf, minver=()):
"""
Checks if ruby is installed.
If installed the variable RUBY will be set in environment.
Ruby binary can be overridden by --with-ruby-binary config variable
"""
if Options.options.rubybinary:
conf.env.RUBY = Options.options.rubybinary
else:
conf.find_program("ruby", var="RUBY", mandatory=True)
ruby = conf.env.RUBY
try:
version = Utils.cmd_output([ruby, '-e', 'puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip()
except:
conf.fatal('could not determine ruby version')
conf.env.RUBY_VERSION = version
try:
ver = tuple(map(int, version.split(".")))
except:
conf.fatal('unsupported ruby version %r' % version)
cver = ''
if minver:
if ver < minver:
conf.fatal('ruby is too old')
cver = ".".join([str(x) for x in minver])
conf.check_message('ruby', cver, True, version)
@conf
def check_ruby_ext_devel(conf):
if not conf.env.RUBY:
conf.fatal('ruby detection is required first')
if not conf.env.CC_NAME and not conf.env.CXX_NAME:
conf.fatal('load a c/c++ compiler first')
version = tuple(map(int, conf.env.RUBY_VERSION.split(".")))
def read_out(cmd):
return Utils.to_list(Utils.cmd_output([conf.env.RUBY, '-rrbconfig', '-e', cmd]))
def read_config(key):
return read_out('puts Config::CONFIG[%r]' % key)
ruby = conf.env['RUBY']
archdir = read_config('archdir')
cpppath = archdir
if version >= (1, 9, 0):
ruby_hdrdir = read_config('rubyhdrdir')
cpppath += ruby_hdrdir
cpppath += [os.path.join(ruby_hdrdir[0], read_config('arch')[0])]
conf.check(header_name='ruby.h', includes=cpppath, mandatory=True, errmsg='could not find ruby header file')
conf.env.LIBPATH_RUBYEXT = read_config('libdir')
conf.env.LIBPATH_RUBYEXT += archdir
conf.env.CPPPATH_RUBYEXT = cpppath
conf.env.CCFLAGS_RUBYEXT = read_config("CCDLFLAGS")
conf.env.rubyext_PATTERN = '%s.' + read_config('DLEXT')[0]
# ok this is really stupid, but the command and flags are combined.
# so we try to find the first argument...
flags = read_config('LDSHARED')
while flags and flags[0][0] != '-':
flags = flags[1:]
# we also want to strip out the deprecated ppc flags
if len(flags) > 1 and flags[1] == "ppc":
flags = flags[2:]
conf.env.LINKFLAGS_RUBYEXT = flags
conf.env.LINKFLAGS_RUBYEXT += read_config("LIBS")
conf.env.LINKFLAGS_RUBYEXT += read_config("LIBRUBYARG_SHARED")
if Options.options.rubyarchdir:
conf.env.ARCHDIR_RUBY = Options.options.rubyarchdir
else:
conf.env.ARCHDIR_RUBY = read_config('sitearchdir')[0]
if Options.options.rubylibdir:
conf.env.LIBDIR_RUBY = Options.options.rubylibdir
else:
conf.env.LIBDIR_RUBY = read_config('sitelibdir')[0]
def set_options(opt):
opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files')
opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path')
opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary')
ntdb-1.0/buildtools/wafadmin/Tools/suncc.py 0000664 0000000 0000000 00000003522 12241515307 0021036 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
# Ralf Habacker, 2006 (rh)
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
@conftest
def find_scc(conf):
v = conf.env
cc = None
if v['CC']: cc = v['CC']
elif 'CC' in conf.environ: cc = conf.environ['CC']
#if not cc: cc = conf.find_program('gcc', var='CC')
if not cc: cc = conf.find_program('cc', var='CC')
if not cc: conf.fatal('suncc was not found')
cc = conf.cmd_to_list(cc)
try:
if not Utils.cmd_output(cc + ['-flags']):
conf.fatal('suncc %r was not found' % cc)
except ValueError:
conf.fatal('suncc -flags could not be executed')
v['CC'] = cc
v['CC_NAME'] = 'sun'
@conftest
def scc_common_flags(conf):
v = conf.env
# CPPFLAGS CCDEFINES _CCINCFLAGS _CCDEFFLAGS
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o', ''] # solaris hack, separate the -o from the target
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CCDEFINES_ST'] = '-D%s'
v['SONAME_ST'] = '-Wl,-h -Wl,%s'
v['SHLIB_MARKER'] = '-Bdynamic'
v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# shared library
v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
v['shlib_LINKFLAGS'] = ['-G']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
v['staticlib_LINKFLAGS'] = ['-Bstatic']
v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_scc
find_cpp
find_ar
scc_common_flags
cc_load_tools
cc_add_flags
link_add_flags
'''
ntdb-1.0/buildtools/wafadmin/Tools/suncxx.py 0000664 0000000 0000000 00000003460 12241515307 0021254 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
# Ralf Habacker, 2006 (rh)
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
@conftest
def find_sxx(conf):
v = conf.env
cc = None
if v['CXX']: cc = v['CXX']
elif 'CXX' in conf.environ: cc = conf.environ['CXX']
if not cc: cc = conf.find_program('c++', var='CXX')
if not cc: conf.fatal('sunc++ was not found')
cc = conf.cmd_to_list(cc)
try:
if not Utils.cmd_output(cc + ['-flags']):
conf.fatal('sunc++ %r was not found' % cc)
except ValueError:
conf.fatal('sunc++ -flags could not be executed')
v['CXX'] = cc
v['CXX_NAME'] = 'sun'
@conftest
def sxx_common_flags(conf):
v = conf.env
# CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS
v['CXX_SRC_F'] = ''
v['CXX_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
v['CXXLNK_SRC_F'] = ''
v['CXXLNK_TGT_F'] = ['-o', ''] # solaris hack, separate the -o from the target
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CXXDEFINES_ST'] = '-D%s'
v['SONAME_ST'] = '-Wl,-h -Wl,%s'
v['SHLIB_MARKER'] = '-Bdynamic'
v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# shared library
v['shlib_CXXFLAGS'] = ['-Kpic', '-DPIC']
v['shlib_LINKFLAGS'] = ['-G']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
v['staticlib_LINKFLAGS'] = ['-Bstatic']
v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_sxx
find_cpp
find_ar
sxx_common_flags
cxx_load_tools
cxx_add_flags
link_add_flags
'''
ntdb-1.0/buildtools/wafadmin/Tools/tex.py 0000664 0000000 0000000 00000016102 12241515307 0020521 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"TeX/LaTeX/PDFLaTeX support"
import os, re
import Utils, TaskGen, Task, Runner, Build
from TaskGen import feature, before
from Logs import error, warn, debug
re_tex = re.compile(r'\\(?Pinclude|input|import|bringin|lstinputlisting){(?P[^{}]*)}', re.M)
def scan(self):
node = self.inputs[0]
env = self.env
nodes = []
names = []
if not node: return (nodes, names)
code = Utils.readf(node.abspath(env))
curdirnode = self.curdirnode
abs = curdirnode.abspath()
for match in re_tex.finditer(code):
path = match.group('file')
if path:
for k in ['', '.tex', '.ltx']:
# add another loop for the tex include paths?
debug('tex: trying %s%s' % (path, k))
try:
os.stat(abs+os.sep+path+k)
except OSError:
continue
found = path+k
node = curdirnode.find_resource(found)
if node:
nodes.append(node)
else:
debug('tex: could not find %s' % path)
names.append(path)
debug("tex: found the following : %s and names %s" % (nodes, names))
return (nodes, names)
latex_fun, _ = Task.compile_fun('latex', '${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False)
pdflatex_fun, _ = Task.compile_fun('pdflatex', '${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False)
bibtex_fun, _ = Task.compile_fun('bibtex', '${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False)
makeindex_fun, _ = Task.compile_fun('bibtex', '${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False)
g_bibtex_re = re.compile('bibdata', re.M)
def tex_build(task, command='LATEX'):
env = task.env
bld = task.generator.bld
if not env['PROMPT_LATEX']:
env.append_value('LATEXFLAGS', '-interaction=batchmode')
env.append_value('PDFLATEXFLAGS', '-interaction=batchmode')
fun = latex_fun
if command == 'PDFLATEX':
fun = pdflatex_fun
node = task.inputs[0]
reldir = node.bld_dir(env)
#lst = []
#for c in Utils.split_path(reldir):
# if c: lst.append('..')
#srcfile = os.path.join(*(lst + [node.srcpath(env)]))
#sr2 = os.path.join(*(lst + [node.parent.srcpath(env)]))
srcfile = node.abspath(env)
sr2 = node.parent.abspath() + os.pathsep + node.parent.abspath(env) + os.pathsep
aux_node = node.change_ext('.aux')
idx_node = node.change_ext('.idx')
nm = aux_node.name
docuname = nm[ : len(nm) - 4 ] # 4 is the size of ".aux"
# important, set the cwd for everybody
task.cwd = task.inputs[0].parent.abspath(task.env)
warn('first pass on %s' % command)
task.env.env = {'TEXINPUTS': sr2}
task.env.SRCFILE = srcfile
ret = fun(task)
if ret:
return ret
# look in the .aux file if there is a bibfile to process
try:
ct = Utils.readf(aux_node.abspath(env))
except (OSError, IOError):
error('error bibtex scan')
else:
fo = g_bibtex_re.findall(ct)
# there is a .aux file to process
if fo:
warn('calling bibtex')
task.env.env = {'BIBINPUTS': sr2, 'BSTINPUTS': sr2}
task.env.SRCFILE = docuname
ret = bibtex_fun(task)
if ret:
error('error when calling bibtex %s' % docuname)
return ret
# look on the filesystem if there is a .idx file to process
try:
idx_path = idx_node.abspath(env)
os.stat(idx_path)
except OSError:
error('error file.idx scan')
else:
warn('calling makeindex')
task.env.SRCFILE = idx_node.name
task.env.env = {}
ret = makeindex_fun(task)
if ret:
error('error when calling makeindex %s' % idx_path)
return ret
hash = ''
i = 0
while i < 10:
# prevent against infinite loops - one never knows
i += 1
# watch the contents of file.aux
prev_hash = hash
try:
hash = Utils.h_file(aux_node.abspath(env))
except KeyError:
error('could not read aux.h -> %s' % aux_node.abspath(env))
pass
# debug
#print "hash is, ", hash, " ", old_hash
# stop if file.aux does not change anymore
if hash and hash == prev_hash:
break
# run the command
warn('calling %s' % command)
task.env.env = {'TEXINPUTS': sr2 + os.pathsep}
task.env.SRCFILE = srcfile
ret = fun(task)
if ret:
error('error when calling %s %s' % (command, latex_compile_cmd))
return ret
return None # ok
latex_vardeps = ['LATEX', 'LATEXFLAGS']
def latex_build(task):
return tex_build(task, 'LATEX')
pdflatex_vardeps = ['PDFLATEX', 'PDFLATEXFLAGS']
def pdflatex_build(task):
return tex_build(task, 'PDFLATEX')
class tex_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('tex')
@before('apply_core')
def apply_tex(self):
if not getattr(self, 'type', None) in ['latex', 'pdflatex']:
self.type = 'pdflatex'
tree = self.bld
outs = Utils.to_list(getattr(self, 'outs', []))
# prompt for incomplete files (else the batchmode is used)
self.env['PROMPT_LATEX'] = getattr(self, 'prompt', 1)
deps_lst = []
if getattr(self, 'deps', None):
deps = self.to_list(self.deps)
for filename in deps:
n = self.path.find_resource(filename)
if not n in deps_lst: deps_lst.append(n)
self.source = self.to_list(self.source)
for filename in self.source:
base, ext = os.path.splitext(filename)
node = self.path.find_resource(filename)
if not node: raise Utils.WafError('cannot find %s' % filename)
if self.type == 'latex':
task = self.create_task('latex', node, node.change_ext('.dvi'))
elif self.type == 'pdflatex':
task = self.create_task('pdflatex', node, node.change_ext('.pdf'))
task.env = self.env
task.curdirnode = self.path
# add the manual dependencies
if deps_lst:
variant = node.variant(self.env)
try:
lst = tree.node_deps[task.unique_id()]
for n in deps_lst:
if not n in lst:
lst.append(n)
except KeyError:
tree.node_deps[task.unique_id()] = deps_lst
if self.type == 'latex':
if 'ps' in outs:
tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps'))
tsk.env.env = {'TEXINPUTS' : node.parent.abspath() + os.pathsep + self.path.abspath() + os.pathsep + self.path.abspath(self.env)}
if 'pdf' in outs:
tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf'))
tsk.env.env = {'TEXINPUTS' : node.parent.abspath() + os.pathsep + self.path.abspath() + os.pathsep + self.path.abspath(self.env)}
elif self.type == 'pdflatex':
if 'ps' in outs:
self.create_task('pdf2ps', task.outputs, node.change_ext('.ps'))
self.source = []
def detect(conf):
v = conf.env
for p in 'tex latex pdflatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split():
conf.find_program(p, var=p.upper())
v[p.upper()+'FLAGS'] = ''
v['DVIPSFLAGS'] = '-Ppdf'
b = Task.simple_task_type
b('tex', '${TEX} ${TEXFLAGS} ${SRC}', color='BLUE', shell=False) # not used anywhere
b('bibtex', '${BIBTEX} ${BIBTEXFLAGS} ${SRC}', color='BLUE', shell=False) # not used anywhere
b('dvips', '${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}', color='BLUE', after="latex pdflatex tex bibtex", shell=False)
b('dvipdf', '${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}', color='BLUE', after="latex pdflatex tex bibtex", shell=False)
b('pdf2ps', '${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}', color='BLUE', after="dvipdf pdflatex", shell=False)
b = Task.task_type_from_func
cls = b('latex', latex_build, vars=latex_vardeps)
cls.scan = scan
cls = b('pdflatex', pdflatex_build, vars=pdflatex_vardeps)
cls.scan = scan
ntdb-1.0/buildtools/wafadmin/Tools/unittestw.py 0000664 0000000 0000000 00000023077 12241515307 0022000 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2006
"""
Unit tests run in the shutdown() method, and for c/c++ programs
One should NOT have to give parameters to programs to execute
In the shutdown method, add the following code:
>>> def shutdown():
... ut = UnitTest.unit_test()
... ut.run()
... ut.print_results()
Each object to use as a unit test must be a program and must have X{obj.unit_test=1}
"""
import os, sys
import Build, TaskGen, Utils, Options, Logs, Task
from TaskGen import before, after, feature
from Constants import *
class unit_test(object):
"Unit test representation"
def __init__(self):
self.returncode_ok = 0 # Unit test returncode considered OK. All returncodes differing from this one
# will cause the unit test to be marked as "FAILED".
# The following variables are filled with data by run().
# print_results() uses these for printing the unit test summary,
# but if there is need for direct access to the results,
# they can be retrieved here, after calling run().
self.num_tests_ok = 0 # Number of successful unit tests
self.num_tests_failed = 0 # Number of failed unit tests
self.num_tests_err = 0 # Tests that have not even run
self.total_num_tests = 0 # Total amount of unit tests
self.max_label_length = 0 # Maximum label length (pretty-print the output)
self.unit_tests = Utils.ordered_dict() # Unit test dictionary. Key: the label (unit test filename relative
# to the build dir), value: unit test filename with absolute path
self.unit_test_results = {} # Dictionary containing the unit test results.
# Key: the label, value: result (true = success false = failure)
self.unit_test_erroneous = {} # Dictionary indicating erroneous unit tests.
# Key: the label, value: true = unit test has an error false = unit test is ok
self.change_to_testfile_dir = False #True if the test file needs to be executed from the same dir
self.want_to_see_test_output = False #True to see the stdout from the testfile (for example check suites)
self.want_to_see_test_error = False #True to see the stderr from the testfile (for example check suites)
self.run_if_waf_does = 'check' #build was the old default
def run(self):
"Run the unit tests and gather results (note: no output here)"
self.num_tests_ok = 0
self.num_tests_failed = 0
self.num_tests_err = 0
self.total_num_tests = 0
self.max_label_length = 0
self.unit_tests = Utils.ordered_dict()
self.unit_test_results = {}
self.unit_test_erroneous = {}
ld_library_path = []
# If waf is not building, don't run anything
if not Options.commands[self.run_if_waf_does]: return
# Get the paths for the shared libraries, and obtain the unit tests to execute
for obj in Build.bld.all_task_gen:
try:
link_task = obj.link_task
except AttributeError:
pass
else:
lib_path = link_task.outputs[0].parent.abspath(obj.env)
if lib_path not in ld_library_path:
ld_library_path.append(lib_path)
unit_test = getattr(obj, 'unit_test', '')
if unit_test and 'cprogram' in obj.features:
try:
output = obj.path
filename = os.path.join(output.abspath(obj.env), obj.target)
srcdir = output.abspath()
label = os.path.join(output.bldpath(obj.env), obj.target)
self.max_label_length = max(self.max_label_length, len(label))
self.unit_tests[label] = (filename, srcdir)
except KeyError:
pass
self.total_num_tests = len(self.unit_tests)
# Now run the unit tests
Utils.pprint('GREEN', 'Running the unit tests')
count = 0
result = 1
for label in self.unit_tests.allkeys:
file_and_src = self.unit_tests[label]
filename = file_and_src[0]
srcdir = file_and_src[1]
count += 1
line = Build.bld.progress_line(count, self.total_num_tests, Logs.colors.GREEN, Logs.colors.NORMAL)
if Options.options.progress_bar and line:
sys.stderr.write(line)
sys.stderr.flush()
try:
kwargs = {}
kwargs['env'] = os.environ.copy()
if self.change_to_testfile_dir:
kwargs['cwd'] = srcdir
if not self.want_to_see_test_output:
kwargs['stdout'] = Utils.pproc.PIPE # PIPE for ignoring output
if not self.want_to_see_test_error:
kwargs['stderr'] = Utils.pproc.PIPE # PIPE for ignoring output
if ld_library_path:
v = kwargs['env']
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(v, ld_library_path, 'PATH')
elif sys.platform == 'darwin':
add_path(v, ld_library_path, 'DYLD_LIBRARY_PATH')
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
else:
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
pp = Utils.pproc.Popen(filename, **kwargs)
(out, err) = pp.communicate() # uh, and the output is ignored?? - fortunately this is going to disappear
result = int(pp.returncode == self.returncode_ok)
if result:
self.num_tests_ok += 1
else:
self.num_tests_failed += 1
self.unit_test_results[label] = result
self.unit_test_erroneous[label] = 0
except OSError:
self.unit_test_erroneous[label] = 1
self.num_tests_err += 1
except KeyboardInterrupt:
pass
if Options.options.progress_bar: sys.stdout.write(Logs.colors.cursor_on)
def print_results(self):
"Pretty-prints a summary of all unit tests, along with some statistics"
# If waf is not building, don't output anything
if not Options.commands[self.run_if_waf_does]: return
p = Utils.pprint
# Early quit if no tests were performed
if self.total_num_tests == 0:
p('YELLOW', 'No unit tests present')
return
for label in self.unit_tests.allkeys:
filename = self.unit_tests[label]
err = 0
result = 0
try: err = self.unit_test_erroneous[label]
except KeyError: pass
try: result = self.unit_test_results[label]
except KeyError: pass
n = self.max_label_length - len(label)
if err: n += 4
elif result: n += 7
else: n += 3
line = '%s %s' % (label, '.' * n)
if err: p('RED', '%sERROR' % line)
elif result: p('GREEN', '%sOK' % line)
else: p('YELLOW', '%sFAILED' % line)
percentage_ok = float(self.num_tests_ok) / float(self.total_num_tests) * 100.0
percentage_failed = float(self.num_tests_failed) / float(self.total_num_tests) * 100.0
percentage_erroneous = float(self.num_tests_err) / float(self.total_num_tests) * 100.0
p('NORMAL', '''
Successful tests: %i (%.1f%%)
Failed tests: %i (%.1f%%)
Erroneous tests: %i (%.1f%%)
Total number of tests: %i
''' % (self.num_tests_ok, percentage_ok, self.num_tests_failed, percentage_failed,
self.num_tests_err, percentage_erroneous, self.total_num_tests))
p('GREEN', 'Unit tests finished')
############################################################################################
"""
New unit test system
The targets with feature 'test' are executed after they are built
bld(features='cprogram cc test', ...)
To display the results:
import UnitTest
bld.add_post_fun(UnitTest.summary)
"""
import threading
testlock = threading.Lock()
def set_options(opt):
opt.add_option('--alltests', action='store_true', default=True, help='Exec all unit tests', dest='all_tests')
@feature('test')
@after('apply_link', 'vars_target_cprogram')
def make_test(self):
if not 'cprogram' in self.features:
Logs.error('test cannot be executed %s' % self)
return
self.default_install_path = None
self.create_task('utest', self.link_task.outputs)
def exec_test(self):
status = 0
variant = self.env.variant()
filename = self.inputs[0].abspath(self.env)
self.ut_exec = getattr(self, 'ut_exec', [filename])
if getattr(self.generator, 'ut_fun', None):
self.generator.ut_fun(self)
try:
fu = getattr(self.generator.bld, 'all_test_paths')
except AttributeError:
fu = os.environ.copy()
self.generator.bld.all_test_paths = fu
lst = []
for obj in self.generator.bld.all_task_gen:
link_task = getattr(obj, 'link_task', None)
if link_task and link_task.env.variant() == variant:
lst.append(link_task.outputs[0].parent.abspath(obj.env))
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(fu, lst, 'PATH')
elif sys.platform == 'darwin':
add_path(fu, lst, 'DYLD_LIBRARY_PATH')
add_path(fu, lst, 'LD_LIBRARY_PATH')
else:
add_path(fu, lst, 'LD_LIBRARY_PATH')
cwd = getattr(self.generator, 'ut_cwd', '') or self.inputs[0].parent.abspath(self.env)
proc = Utils.pproc.Popen(self.ut_exec, cwd=cwd, env=fu, stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE)
(stdout, stderr) = proc.communicate()
tup = (filename, proc.returncode, stdout, stderr)
self.generator.utest_result = tup
testlock.acquire()
try:
bld = self.generator.bld
Logs.debug("ut: %r", tup)
try:
bld.utest_results.append(tup)
except AttributeError:
bld.utest_results = [tup]
finally:
testlock.release()
cls = Task.task_type_from_func('utest', func=exec_test, color='PINK', ext_in='.bin')
old = cls.runnable_status
def test_status(self):
ret = old(self)
if ret == SKIP_ME and getattr(Options.options, 'all_tests', False):
return RUN_ME
return ret
cls.runnable_status = test_status
cls.quiet = 1
def summary(bld):
lst = getattr(bld, 'utest_results', [])
if lst:
Utils.pprint('CYAN', 'execution summary')
total = len(lst)
tfail = len([x for x in lst if x[1]])
Utils.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total))
for (f, code, out, err) in lst:
if not code:
Utils.pprint('CYAN', ' %s' % f)
Utils.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total))
for (f, code, out, err) in lst:
if code:
Utils.pprint('CYAN', ' %s' % f)
ntdb-1.0/buildtools/wafadmin/Tools/vala.py 0000664 0000000 0000000 00000024071 12241515307 0020650 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Ali Sabil, 2007
import os.path, shutil
import Task, Runner, Utils, Logs, Build, Node, Options
from TaskGen import extension, after, before
EXT_VALA = ['.vala', '.gs']
class valac_task(Task.Task):
vars = ("VALAC", "VALAC_VERSION", "VALAFLAGS")
before = ("cc", "cxx")
def run(self):
env = self.env
inputs = [a.srcpath(env) for a in self.inputs]
valac = env['VALAC']
vala_flags = env.get_flat('VALAFLAGS')
top_src = self.generator.bld.srcnode.abspath()
top_bld = self.generator.bld.srcnode.abspath(env)
if env['VALAC_VERSION'] > (0, 1, 6):
cmd = [valac, '-C', '--quiet', vala_flags]
else:
cmd = [valac, '-C', vala_flags]
if self.threading:
cmd.append('--thread')
if self.profile:
cmd.append('--profile=%s' % self.profile)
if self.target_glib:
cmd.append('--target-glib=%s' % self.target_glib)
features = self.generator.features
if 'cshlib' in features or 'cstaticlib' in features:
output_dir = self.outputs[0].bld_dir(env)
cmd.append('--library ' + self.target)
if env['VALAC_VERSION'] >= (0, 7, 0):
for x in self.outputs:
if x.name.endswith('.h'):
cmd.append('--header ' + x.bldpath(self.env))
cmd.append('--basedir ' + top_src)
cmd.append('-d ' + top_bld)
if env['VALAC_VERSION'] > (0, 7, 2) and hasattr(self, 'gir'):
cmd.append('--gir=%s.gir' % self.gir)
else:
output_dir = self.outputs[0].bld_dir(env)
cmd.append('-d %s' % output_dir)
for vapi_dir in self.vapi_dirs:
cmd.append('--vapidir=%s' % vapi_dir)
for package in self.packages:
cmd.append('--pkg %s' % package)
for package in self.packages_private:
cmd.append('--pkg %s' % package)
cmd.append(" ".join(inputs))
result = self.generator.bld.exec_command(" ".join(cmd))
if not 'cprogram' in features:
# generate the .deps file
if self.packages:
filename = os.path.join(self.generator.path.abspath(env), "%s.deps" % self.target)
deps = open(filename, 'w')
for package in self.packages:
deps.write(package + '\n')
deps.close()
# handle vala 0.1.6 who doesn't honor --directory for the generated .vapi
self._fix_output("../%s.vapi" % self.target)
# handle vala >= 0.1.7 who has a weid definition for --directory
self._fix_output("%s.vapi" % self.target)
# handle vala >= 0.2.0 who doesn't honor --directory for the generated .gidl
self._fix_output("%s.gidl" % self.target)
# handle vala >= 0.3.6 who doesn't honor --directory for the generated .gir
self._fix_output("%s.gir" % self.target)
if hasattr(self, 'gir'):
self._fix_output("%s.gir" % self.gir)
first = None
for node in self.outputs:
if not first:
first = node
else:
if first.parent.id != node.parent.id:
# issue #483
if env['VALAC_VERSION'] < (0, 7, 0):
shutil.move(first.parent.abspath(self.env) + os.sep + node.name, node.abspath(self.env))
return result
def install(self):
bld = self.generator.bld
features = self.generator.features
if self.attr("install_path") and ("cshlib" in features or "cstaticlib" in features):
headers_list = [o for o in self.outputs if o.suffix() == ".h"]
vapi_list = [o for o in self.outputs if (o.suffix() in (".vapi", ".deps"))]
gir_list = [o for o in self.outputs if o.suffix() == ".gir"]
for header in headers_list:
top_src = self.generator.bld.srcnode
package = self.env['PACKAGE']
try:
api_version = Utils.g_module.API_VERSION
except AttributeError:
version = Utils.g_module.VERSION.split(".")
if version[0] == "0":
api_version = "0." + version[1]
else:
api_version = version[0] + ".0"
install_path = '${INCLUDEDIR}/%s-%s/%s' % (package, api_version, header.relpath_gen(top_src))
bld.install_as(install_path, header, self.env)
bld.install_files('${DATAROOTDIR}/vala/vapi', vapi_list, self.env)
bld.install_files('${DATAROOTDIR}/gir-1.0', gir_list, self.env)
def _fix_output(self, output):
top_bld = self.generator.bld.srcnode.abspath(self.env)
try:
src = os.path.join(top_bld, output)
dst = self.generator.path.abspath (self.env)
shutil.move(src, dst)
except:
pass
@extension(EXT_VALA)
def vala_file(self, node):
valatask = getattr(self, "valatask", None)
# there is only one vala task and it compiles all vala files .. :-/
if not valatask:
valatask = self.create_task('valac')
self.valatask = valatask
self.includes = Utils.to_list(getattr(self, 'includes', []))
self.uselib = self.to_list(self.uselib)
valatask.packages = []
valatask.packages_private = Utils.to_list(getattr(self, 'packages_private', []))
valatask.vapi_dirs = []
valatask.target = self.target
valatask.threading = False
valatask.install_path = self.install_path
valatask.profile = getattr (self, 'profile', 'gobject')
valatask.target_glib = None #Deprecated
packages = Utils.to_list(getattr(self, 'packages', []))
vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', []))
includes = []
if hasattr(self, 'uselib_local'):
local_packages = Utils.to_list(self.uselib_local)
seen = []
while len(local_packages) > 0:
package = local_packages.pop()
if package in seen:
continue
seen.append(package)
# check if the package exists
package_obj = self.name_to_obj(package)
if not package_obj:
raise Utils.WafError("object '%s' was not found in uselib_local (required by '%s')" % (package, self.name))
package_name = package_obj.target
package_node = package_obj.path
package_dir = package_node.relpath_gen(self.path)
for task in package_obj.tasks:
for output in task.outputs:
if output.name == package_name + ".vapi":
valatask.set_run_after(task)
if package_name not in packages:
packages.append(package_name)
if package_dir not in vapi_dirs:
vapi_dirs.append(package_dir)
if package_dir not in includes:
includes.append(package_dir)
if hasattr(package_obj, 'uselib_local'):
lst = self.to_list(package_obj.uselib_local)
lst.reverse()
local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages
valatask.packages = packages
for vapi_dir in vapi_dirs:
try:
valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath())
valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath(self.env))
except AttributeError:
Logs.warn("Unable to locate Vala API directory: '%s'" % vapi_dir)
self.includes.append(node.bld.srcnode.abspath())
self.includes.append(node.bld.srcnode.abspath(self.env))
for include in includes:
try:
self.includes.append(self.path.find_dir(include).abspath())
self.includes.append(self.path.find_dir(include).abspath(self.env))
except AttributeError:
Logs.warn("Unable to locate include directory: '%s'" % include)
if valatask.profile == 'gobject':
if hasattr(self, 'target_glib'):
Logs.warn ('target_glib on vala tasks is deprecated --vala-target-glib=MAJOR.MINOR from the vala tool options')
if getattr(Options.options, 'vala_target_glib', None):
valatask.target_glib = Options.options.vala_target_glib
if not 'GOBJECT' in self.uselib:
self.uselib.append('GOBJECT')
if hasattr(self, 'threading'):
if valatask.profile == 'gobject':
valatask.threading = self.threading
if not 'GTHREAD' in self.uselib:
self.uselib.append('GTHREAD')
else:
#Vala doesn't have threading support for dova nor posix
Logs.warn("Profile %s does not have threading support" % valatask.profile)
if hasattr(self, 'gir'):
valatask.gir = self.gir
env = valatask.env
output_nodes = []
c_node = node.change_ext('.c')
output_nodes.append(c_node)
self.allnodes.append(c_node)
if env['VALAC_VERSION'] < (0, 7, 0):
output_nodes.append(node.change_ext('.h'))
else:
if not 'cprogram' in self.features:
output_nodes.append(self.path.find_or_declare('%s.h' % self.target))
if not 'cprogram' in self.features:
output_nodes.append(self.path.find_or_declare('%s.vapi' % self.target))
if env['VALAC_VERSION'] > (0, 7, 2):
if hasattr(self, 'gir'):
output_nodes.append(self.path.find_or_declare('%s.gir' % self.gir))
elif env['VALAC_VERSION'] > (0, 3, 5):
output_nodes.append(self.path.find_or_declare('%s.gir' % self.target))
elif env['VALAC_VERSION'] > (0, 1, 7):
output_nodes.append(self.path.find_or_declare('%s.gidl' % self.target))
if valatask.packages:
output_nodes.append(self.path.find_or_declare('%s.deps' % self.target))
valatask.inputs.append(node)
valatask.outputs.extend(output_nodes)
def detect(conf):
min_version = (0, 1, 6)
min_version_str = "%d.%d.%d" % min_version
valac = conf.find_program('valac', var='VALAC', mandatory=True)
if not conf.env["HAVE_GOBJECT"]:
pkg_args = {'package': 'gobject-2.0',
'uselib_store': 'GOBJECT',
'args': '--cflags --libs'}
if getattr(Options.options, 'vala_target_glib', None):
pkg_args['atleast_version'] = Options.options.vala_target_glib
conf.check_cfg(**pkg_args)
if not conf.env["HAVE_GTHREAD"]:
pkg_args = {'package': 'gthread-2.0',
'uselib_store': 'GTHREAD',
'args': '--cflags --libs'}
if getattr(Options.options, 'vala_target_glib', None):
pkg_args['atleast_version'] = Options.options.vala_target_glib
conf.check_cfg(**pkg_args)
try:
output = Utils.cmd_output(valac + " --version", silent=True)
version = output.split(' ', 1)[-1].strip().split(".")[0:3]
version = [int(x) for x in version]
valac_version = tuple(version)
except Exception:
valac_version = (0, 0, 0)
conf.check_message('program version',
'valac >= ' + min_version_str,
valac_version >= min_version,
"%d.%d.%d" % valac_version)
conf.check_tool('gnu_dirs')
if valac_version < min_version:
conf.fatal("valac version too old to be used with this tool")
return
conf.env['VALAC_VERSION'] = valac_version
conf.env['VALAFLAGS'] = ''
def set_options (opt):
valaopts = opt.add_option_group('Vala Compiler Options')
valaopts.add_option ('--vala-target-glib', default=None,
dest='vala_target_glib', metavar='MAJOR.MINOR',
help='Target version of glib for Vala GObject code generation')
ntdb-1.0/buildtools/wafadmin/Tools/winres.py 0000664 0000000 0000000 00000002406 12241515307 0021232 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Brant Young, 2007
"This hook is called when the class cpp/cc task generator encounters a '.rc' file: X{.rc -> [.res|.rc.o]}"
import os, sys, re
import TaskGen, Task
from Utils import quote_whitespace
from TaskGen import extension
EXT_WINRC = ['.rc']
winrc_str = '${WINRC} ${_CPPDEFFLAGS} ${_CCDEFFLAGS} ${WINRCFLAGS} ${_CPPINCFLAGS} ${_CCINCFLAGS} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}'
@extension(EXT_WINRC)
def rc_file(self, node):
obj_ext = '.rc.o'
if self.env['WINRC_TGT_F'] == '/fo': obj_ext = '.res'
rctask = self.create_task('winrc', node, node.change_ext(obj_ext))
self.compiled_tasks.append(rctask)
# create our action, for use with rc file
Task.simple_task_type('winrc', winrc_str, color='BLUE', before='cc cxx', shell=False)
def detect(conf):
v = conf.env
winrc = v['WINRC']
v['WINRC_TGT_F'] = '-o'
v['WINRC_SRC_F'] = '-i'
# find rc.exe
if not winrc:
if v['CC_NAME'] in ['gcc', 'cc', 'g++', 'c++']:
winrc = conf.find_program('windres', var='WINRC', path_list = v['PATH'])
elif v['CC_NAME'] == 'msvc':
winrc = conf.find_program('RC', var='WINRC', path_list = v['PATH'])
v['WINRC_TGT_F'] = '/fo'
v['WINRC_SRC_F'] = ''
if not winrc:
conf.fatal('winrc was not found!')
v['WINRCFLAGS'] = ''
ntdb-1.0/buildtools/wafadmin/Tools/xlc.py 0000664 0000000 0000000 00000003770 12241515307 0020516 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2008 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
# Michael Kuhn, 2009
import os, sys
import Configure, Options, Utils
import ccroot, ar
from Configure import conftest
@conftest
def find_xlc(conf):
cc = conf.find_program(['xlc_r', 'xlc'], var='CC', mandatory=True)
cc = conf.cmd_to_list(cc)
conf.env.CC_NAME = 'xlc'
conf.env.CC = cc
@conftest
def find_cpp(conf):
v = conf.env
cpp = None
if v['CPP']: cpp = v['CPP']
elif 'CPP' in conf.environ: cpp = conf.environ['CPP']
#if not cpp: cpp = v['CC']
v['CPP'] = cpp
@conftest
def xlc_common_flags(conf):
v = conf.env
# CPPFLAGS CCDEFINES _CCINCFLAGS _CCDEFFLAGS
v['CCFLAGS_DEBUG'] = ['-g']
v['CCFLAGS_RELEASE'] = ['-O2']
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o', ''] # shell hack for -MD
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['RPATH_ST'] = '-Wl,-rpath,%s'
v['CCDEFINES_ST'] = '-D%s'
v['SONAME_ST'] = ''
v['SHLIB_MARKER'] = ''
v['STATICLIB_MARKER'] = ''
v['FULLSTATIC_MARKER'] = '-static'
# program
v['program_LINKFLAGS'] = ['-Wl,-brtl']
v['program_PATTERN'] = '%s'
# shared library
v['shlib_CCFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro
v['shlib_LINKFLAGS'] = ['-G', '-Wl,-brtl,-bexpfull']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
v['staticlib_LINKFLAGS'] = ''
v['staticlib_PATTERN'] = 'lib%s.a'
def detect(conf):
conf.find_xlc()
conf.find_cpp()
conf.find_ar()
conf.xlc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
ntdb-1.0/buildtools/wafadmin/Tools/xlcxx.py 0000664 0000000 0000000 00000004023 12241515307 0021066 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
# Michael Kuhn, 2009
import os, sys
import Configure, Options, Utils
import ccroot, ar
from Configure import conftest
@conftest
def find_xlcxx(conf):
cxx = conf.find_program(['xlc++_r', 'xlc++'], var='CXX', mandatory=True)
cxx = conf.cmd_to_list(cxx)
conf.env.CXX_NAME = 'xlc++'
conf.env.CXX = cxx
@conftest
def find_cpp(conf):
v = conf.env
cpp = None
if v['CPP']: cpp = v['CPP']
elif 'CPP' in conf.environ: cpp = conf.environ['CPP']
#if not cpp: cpp = v['CXX']
v['CPP'] = cpp
@conftest
def xlcxx_common_flags(conf):
v = conf.env
# CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS
v['CXXFLAGS_DEBUG'] = ['-g']
v['CXXFLAGS_RELEASE'] = ['-O2']
v['CXX_SRC_F'] = ''
v['CXX_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
v['CXXLNK_SRC_F'] = ''
v['CXXLNK_TGT_F'] = ['-o', ''] # shell hack for -MD
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['RPATH_ST'] = '-Wl,-rpath,%s'
v['CXXDEFINES_ST'] = '-D%s'
v['SONAME_ST'] = ''
v['SHLIB_MARKER'] = ''
v['STATICLIB_MARKER'] = ''
v['FULLSTATIC_MARKER'] = '-static'
# program
v['program_LINKFLAGS'] = ['-Wl,-brtl']
v['program_PATTERN'] = '%s'
# shared library
v['shlib_CXXFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro
v['shlib_LINKFLAGS'] = ['-G', '-Wl,-brtl,-bexpfull']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
v['staticlib_LINKFLAGS'] = ''
v['staticlib_PATTERN'] = 'lib%s.a'
def detect(conf):
conf.find_xlcxx()
conf.find_cpp()
conf.find_ar()
conf.xlcxx_common_flags()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
ntdb-1.0/buildtools/wafadmin/Utils.py 0000664 0000000 0000000 00000044265 12241515307 0017734 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"""
Utilities, the stable ones are the following:
* h_file: compute a unique value for a file (hash), it uses
the module fnv if it is installed (see waf/utils/fnv & http://code.google.com/p/waf/wiki/FAQ)
else, md5 (see the python docs)
For large projects (projects with more than 15000 files) or slow hard disks and filesystems (HFS)
it is possible to use a hashing based on the path and the size (may give broken cache results)
The method h_file MUST raise an OSError if the file is a folder
import stat
def h_file(filename):
st = os.lstat(filename)
if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(filename)
return m.digest()
To replace the function in your project, use something like this:
import Utils
Utils.h_file = h_file
* h_list
* h_fun
* get_term_cols
* ordered_dict
"""
import os, sys, imp, string, errno, traceback, inspect, re, shutil, datetime, gc
# In python 3.0 we can get rid of all this
try: from UserDict import UserDict
except ImportError: from collections import UserDict
if sys.hexversion >= 0x2060000 or os.name == 'java':
import subprocess as pproc
else:
import pproc
import Logs
from Constants import *
try:
from collections import deque
except ImportError:
class deque(list):
def popleft(self):
return self.pop(0)
is_win32 = sys.platform == 'win32'
try:
# defaultdict in python 2.5
from collections import defaultdict as DefaultDict
except ImportError:
class DefaultDict(dict):
def __init__(self, default_factory):
super(DefaultDict, self).__init__()
self.default_factory = default_factory
def __getitem__(self, key):
try:
return super(DefaultDict, self).__getitem__(key)
except KeyError:
value = self.default_factory()
self[key] = value
return value
class WafError(Exception):
def __init__(self, *args):
self.args = args
try:
self.stack = traceback.extract_stack()
except:
pass
Exception.__init__(self, *args)
def __str__(self):
return str(len(self.args) == 1 and self.args[0] or self.args)
class WscriptError(WafError):
def __init__(self, message, wscript_file=None):
if wscript_file:
self.wscript_file = wscript_file
self.wscript_line = None
else:
try:
(self.wscript_file, self.wscript_line) = self.locate_error()
except:
(self.wscript_file, self.wscript_line) = (None, None)
msg_file_line = ''
if self.wscript_file:
msg_file_line = "%s:" % self.wscript_file
if self.wscript_line:
msg_file_line += "%s:" % self.wscript_line
err_message = "%s error: %s" % (msg_file_line, message)
WafError.__init__(self, err_message)
def locate_error(self):
stack = traceback.extract_stack()
stack.reverse()
for frame in stack:
file_name = os.path.basename(frame[0])
is_wscript = (file_name == WSCRIPT_FILE or file_name == WSCRIPT_BUILD_FILE)
if is_wscript:
return (frame[0], frame[1])
return (None, None)
indicator = is_win32 and '\x1b[A\x1b[K%s%s%s\r' or '\x1b[K%s%s%s\r'
try:
from fnv import new as md5
import Constants
Constants.SIG_NIL = 'signofnv'
def h_file(filename):
m = md5()
try:
m.hfile(filename)
x = m.digest()
if x is None: raise OSError("not a file")
return x
except SystemError:
raise OSError("not a file" + filename)
except ImportError:
try:
try:
from hashlib import md5
except ImportError:
from md5 import md5
def h_file(filename):
f = open(filename, 'rb')
m = md5()
while (filename):
filename = f.read(100000)
m.update(filename)
f.close()
return m.digest()
except ImportError:
# portability fixes may be added elsewhere (although, md5 should be everywhere by now)
md5 = None
class ordered_dict(UserDict):
def __init__(self, dict = None):
self.allkeys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
self.allkeys.remove(key)
UserDict.__delitem__(self, key)
def __setitem__(self, key, item):
if key not in self.allkeys: self.allkeys.append(key)
UserDict.__setitem__(self, key, item)
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout'] = kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
try:
proc = pproc.Popen(s, **kw)
return proc.wait()
except OSError:
return -1
if is_win32:
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout'] = kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
if len(s) > 2000:
startupinfo = pproc.STARTUPINFO()
startupinfo.dwFlags |= pproc.STARTF_USESHOWWINDOW
kw['startupinfo'] = startupinfo
try:
if 'stdout' not in kw:
kw['stdout'] = pproc.PIPE
kw['stderr'] = pproc.PIPE
kw['universal_newlines'] = True
proc = pproc.Popen(s,**kw)
(stdout, stderr) = proc.communicate()
Logs.info(stdout)
if stderr:
Logs.error(stderr)
return proc.returncode
else:
proc = pproc.Popen(s,**kw)
return proc.wait()
except OSError:
return -1
listdir = os.listdir
if is_win32:
def listdir_win32(s):
if re.match('^[A-Za-z]:$', s):
# os.path.isdir fails if s contains only the drive name... (x:)
s += os.sep
if not os.path.isdir(s):
e = OSError()
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = listdir_win32
def waf_version(mini = 0x010000, maxi = 0x100000):
"Halts if the waf version is wrong"
ver = HEXVERSION
try: min_val = mini + 0
except TypeError: min_val = int(mini.replace('.', '0'), 16)
if min_val > ver:
Logs.error("waf version should be at least %s (%s found)" % (mini, ver))
sys.exit(1)
try: max_val = maxi + 0
except TypeError: max_val = int(maxi.replace('.', '0'), 16)
if max_val < ver:
Logs.error("waf version should be at most %s (%s found)" % (maxi, ver))
sys.exit(1)
def python_24_guard():
if sys.hexversion < 0x20400f0 or sys.hexversion >= 0x3000000:
raise ImportError("Waf requires Python >= 2.3 but the raw source requires Python 2.4, 2.5 or 2.6")
def ex_stack():
exc_type, exc_value, tb = sys.exc_info()
if Logs.verbose > 1:
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
return ''.join(exc_lines)
return str(exc_value)
def to_list(sth):
if isinstance(sth, str):
return sth.split()
else:
return sth
g_loaded_modules = {}
"index modules by absolute path"
g_module=None
"the main module is special"
def load_module(file_path, name=WSCRIPT_FILE):
"this function requires an absolute path"
try:
return g_loaded_modules[file_path]
except KeyError:
pass
module = imp.new_module(name)
try:
code = readf(file_path, m='rU')
except (IOError, OSError):
raise WscriptError('Could not read the file %r' % file_path)
module.waf_hash_val = code
dt = os.path.dirname(file_path)
sys.path.insert(0, dt)
try:
exec(compile(code, file_path, 'exec'), module.__dict__)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), file_path)
sys.path.remove(dt)
g_loaded_modules[file_path] = module
return module
def set_main_module(file_path):
"Load custom options, if defined"
global g_module
g_module = load_module(file_path, 'wscript_main')
g_module.root_path = file_path
try:
g_module.APPNAME
except:
g_module.APPNAME = 'noname'
try:
g_module.VERSION
except:
g_module.VERSION = '1.0'
# note: to register the module globally, use the following:
# sys.modules['wscript_main'] = g_module
def to_hashtable(s):
"used for importing env files"
tbl = {}
lst = s.split('\n')
for line in lst:
if not line: continue
mems = line.split('=')
tbl[mems[0]] = mems[1]
return tbl
def get_term_cols():
"console width"
return 80
try:
import struct, fcntl, termios
except ImportError:
pass
else:
if Logs.got_tty:
def myfun():
dummy_lines, cols = struct.unpack("HHHH", \
fcntl.ioctl(sys.stderr.fileno(),termios.TIOCGWINSZ , \
struct.pack("HHHH", 0, 0, 0, 0)))[:2]
return cols
# we actually try the function once to see if it is suitable
try:
myfun()
except:
pass
else:
get_term_cols = myfun
rot_idx = 0
rot_chr = ['\\', '|', '/', '-']
"the rotation character in the progress bar"
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret = path.split('/')[2:]
ret[0] = '/' + ret[0]
return ret
return path.split('/')
re_sp = re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret = re.split(re_sp, path)[2:]
ret[0] = '\\' + ret[0]
return ret
return re.split(re_sp, path)
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif is_win32:
split_path = split_path_win32
def copy_attrs(orig, dest, names, only_if_set=False):
for a in to_list(names):
u = getattr(orig, a, ())
if u or not only_if_set:
setattr(dest, a, u)
def def_attrs(cls, **kw):
'''
set attributes for class.
@param cls [any class]: the class to update the given attributes in.
@param kw [dictionary]: dictionary of attributes names and values.
if the given class hasn't one (or more) of these attributes, add the attribute with its value to the class.
'''
for k, v in kw.iteritems():
if not hasattr(cls, k):
setattr(cls, k, v)
def quote_define_name(path):
fu = re.compile("[^a-zA-Z0-9]").sub("_", path)
fu = fu.upper()
return fu
def quote_whitespace(path):
return (path.strip().find(' ') > 0 and '"%s"' % path or path).replace('""', '"')
def trimquotes(s):
if not s: return ''
s = s.rstrip()
if s[0] == "'" and s[-1] == "'": return s[1:-1]
return s
def h_list(lst):
m = md5()
m.update(str(lst))
return m.digest()
def h_fun(fun):
try:
return fun.code
except AttributeError:
try:
h = inspect.getsource(fun)
except IOError:
h = "nocode"
try:
fun.code = h
except AttributeError:
pass
return h
def pprint(col, str, label='', sep='\n'):
"print messages in color"
sys.stderr.write("%s%s%s %s%s" % (Logs.colors(col), str, Logs.colors.NORMAL, label, sep))
def check_dir(dir):
"""If a folder doesn't exists, create it."""
try:
os.lstat(dir)
except OSError:
try:
os.makedirs(dir)
except OSError, e:
raise WafError("Cannot create folder '%s' (original error: %s)" % (dir, e))
def cmd_output(cmd, **kw):
silent = False
if 'silent' in kw:
silent = kw['silent']
del(kw['silent'])
if 'e' in kw:
tmp = kw['e']
del(kw['e'])
kw['env'] = tmp
kw['shell'] = isinstance(cmd, str)
kw['stdout'] = pproc.PIPE
if silent:
kw['stderr'] = pproc.PIPE
try:
p = pproc.Popen(cmd, **kw)
output = p.communicate()[0]
except OSError, e:
raise ValueError(str(e))
if p.returncode:
if not silent:
msg = "command execution failed: %s -> %r" % (cmd, str(output))
raise ValueError(msg)
output = ''
return output
reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr, params):
"substitute ${PREFIX}/bin in /usr/local/bin"
def repl_var(m):
if m.group(1):
return '\\'
if m.group(2):
return '$'
try:
# environments may contain lists
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
return reg_subst.sub(repl_var, expr)
def unversioned_sys_platform_to_binary_format(unversioned_sys_platform):
"infers the binary format from the unversioned_sys_platform name."
if unversioned_sys_platform in ('linux', 'freebsd', 'netbsd', 'openbsd', 'sunos', 'gnu'):
return 'elf'
elif unversioned_sys_platform == 'darwin':
return 'mac-o'
elif unversioned_sys_platform in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
# TODO we assume all other operating systems are elf, which is not true.
# we may set this to 'unknown' and have ccroot and other tools handle the case "gracefully" (whatever that means).
return 'elf'
def unversioned_sys_platform():
"""returns an unversioned name from sys.platform.
sys.plaform is not very well defined and depends directly on the python source tree.
The version appended to the names is unreliable as it's taken from the build environment at the time python was built,
i.e., it's possible to get freebsd7 on a freebsd8 system.
So we remove the version from the name, except for special cases where the os has a stupid name like os2 or win32.
Some possible values of sys.platform are, amongst others:
aix3 aix4 atheos beos5 darwin freebsd2 freebsd3 freebsd4 freebsd5 freebsd6 freebsd7
generic gnu0 irix5 irix6 linux2 mac netbsd1 next3 os2emx riscos sunos5 unixware7
Investigating the python source tree may reveal more values.
"""
s = sys.platform
if s == 'java':
# The real OS is hidden under the JVM.
from java.lang import System
s = System.getProperty('os.name')
# see http://lopica.sourceforge.net/os.html for a list of possible values
if s == 'Mac OS X':
return 'darwin'
elif s.startswith('Windows '):
return 'win32'
elif s == 'OS/2':
return 'os2'
elif s == 'HP-UX':
return 'hpux'
elif s in ('SunOS', 'Solaris'):
return 'sunos'
else: s = s.lower()
if s == 'win32' or s.endswith('os2') and s != 'sunos2': return s
return re.split('\d+$', s)[0]
#@deprecated('use unversioned_sys_platform instead')
def detect_platform():
"""this function has been in the Utils module for some time.
It's hard to guess what people have used it for.
It seems its goal is to return an unversionned sys.platform, but it's not handling all platforms.
For example, the version is not removed on freebsd and netbsd, amongst others.
"""
s = sys.platform
# known POSIX
for x in 'cygwin linux irix sunos hpux aix darwin gnu'.split():
# sys.platform may be linux2
if s.find(x) >= 0:
return x
# unknown POSIX
if os.name in 'posix java os2'.split():
return os.name
return s
def load_tool(tool, tooldir=None):
'''
load_tool: import a Python module, optionally using several directories.
@param tool [string]: name of tool to import.
@param tooldir [list]: directories to look for the tool.
@return: the loaded module.
Warning: this function is not thread-safe: plays with sys.path,
so must run in sequence.
'''
if tooldir:
assert isinstance(tooldir, list)
sys.path = tooldir + sys.path
else:
tooldir = []
try:
return __import__(tool)
finally:
for dt in tooldir:
sys.path.remove(dt)
def readf(fname, m='r'):
"get the contents of a file, it is not used anywhere for the moment"
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
return txt
def nada(*k, **kw):
"""A function that does nothing"""
pass
def diff_path(top, subdir):
"""difference between two absolute paths"""
top = os.path.normpath(top).replace('\\', '/').split('/')
subdir = os.path.normpath(subdir).replace('\\', '/').split('/')
if len(top) == len(subdir): return ''
diff = subdir[len(top) - len(subdir):]
return os.path.join(*diff)
class Context(object):
"""A base class for commands to be executed from Waf scripts"""
def set_curdir(self, dir):
self.curdir_ = dir
def get_curdir(self):
try:
return self.curdir_
except AttributeError:
self.curdir_ = os.getcwd()
return self.get_curdir()
curdir = property(get_curdir, set_curdir)
def recurse(self, dirs, name=''):
"""The function for calling scripts from folders, it tries to call wscript + function_name
and if that file does not exist, it will call the method 'function_name' from a file named wscript
the dirs can be a list of folders or a string containing space-separated folder paths
"""
if not name:
name = inspect.stack()[1][3]
if isinstance(dirs, str):
dirs = to_list(dirs)
for x in dirs:
if os.path.isabs(x):
nexdir = x
else:
nexdir = os.path.join(self.curdir, x)
base = os.path.join(nexdir, WSCRIPT_FILE)
file_path = base + '_' + name
try:
txt = readf(file_path, m='rU')
except (OSError, IOError):
try:
module = load_module(base)
except OSError:
raise WscriptError('No such script %s' % base)
try:
f = module.__dict__[name]
except KeyError:
raise WscriptError('No function %s defined in %s' % (name, base))
if getattr(self.__class__, 'pre_recurse', None):
self.pre_recurse(f, base, nexdir)
old = self.curdir
self.curdir = nexdir
try:
f(self)
finally:
self.curdir = old
if getattr(self.__class__, 'post_recurse', None):
self.post_recurse(module, base, nexdir)
else:
dc = {'ctx': self}
if getattr(self.__class__, 'pre_recurse', None):
dc = self.pre_recurse(txt, file_path, nexdir)
old = self.curdir
self.curdir = nexdir
try:
try:
exec(compile(txt, file_path, 'exec'), dc)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), base)
finally:
self.curdir = old
if getattr(self.__class__, 'post_recurse', None):
self.post_recurse(txt, file_path, nexdir)
if is_win32:
old = shutil.copy2
def copy2(src, dst):
old(src, dst)
shutil.copystat(src, src)
setattr(shutil, 'copy2', copy2)
def zip_folder(dir, zip_file_name, prefix):
"""
prefix represents the app to add in the archive
"""
import zipfile
zip = zipfile.ZipFile(zip_file_name, 'w', compression=zipfile.ZIP_DEFLATED)
base = os.path.abspath(dir)
if prefix:
if prefix[-1] != os.sep:
prefix += os.sep
n = len(base)
for root, dirs, files in os.walk(base):
for f in files:
archive_name = prefix + root[n:] + os.sep + f
zip.write(root + os.sep + f, archive_name, zipfile.ZIP_DEFLATED)
zip.close()
def get_elapsed_time(start):
"Format a time delta (datetime.timedelta) using the format DdHhMmS.MSs"
delta = datetime.datetime.now() - start
# cast to int necessary for python 3.0
days = int(delta.days)
hours = int(delta.seconds / 3600)
minutes = int((delta.seconds - hours * 3600) / 60)
seconds = delta.seconds - hours * 3600 - minutes * 60 \
+ float(delta.microseconds) / 1000 / 1000
result = ''
if days:
result += '%dd' % days
if days or hours:
result += '%dh' % hours
if days or hours or minutes:
result += '%dm' % minutes
return '%s%.3fs' % (result, seconds)
if os.name == 'java':
# For Jython (they should really fix the inconsistency)
try:
gc.disable()
gc.enable()
except NotImplementedError:
gc.disable = gc.enable
def run_once(fun):
"""
decorator, make a function cache its results, use like this:
@run_once
def foo(k):
return 345*2343
"""
cache = {}
def wrap(k):
try:
return cache[k]
except KeyError:
ret = fun(k)
cache[k] = ret
return ret
wrap.__cache__ = cache
return wrap
ntdb-1.0/buildtools/wafadmin/__init__.py 0000664 0000000 0000000 00000000102 12241515307 0020351 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
ntdb-1.0/buildtools/wafadmin/ansiterm.py 0000664 0000000 0000000 00000017524 12241515307 0020454 0 ustar 00root root 0000000 0000000 import sys, os
try:
if (not sys.stderr.isatty()) or (not sys.stdout.isatty()):
raise ValueError('not a tty')
from ctypes import *
class COORD(Structure):
_fields_ = [("X", c_short), ("Y", c_short)]
class SMALL_RECT(Structure):
_fields_ = [("Left", c_short), ("Top", c_short), ("Right", c_short), ("Bottom", c_short)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("Size", COORD), ("CursorPosition", COORD), ("Attributes", c_short), ("Window", SMALL_RECT), ("MaximumWindowSize", COORD)]
class CONSOLE_CURSOR_INFO(Structure):
_fields_ = [('dwSize',c_ulong), ('bVisible', c_int)]
sbinfo = CONSOLE_SCREEN_BUFFER_INFO()
csinfo = CONSOLE_CURSOR_INFO()
hconsole = windll.kernel32.GetStdHandle(-11)
windll.kernel32.GetConsoleScreenBufferInfo(hconsole, byref(sbinfo))
if sbinfo.Size.X < 10 or sbinfo.Size.Y < 10: raise Exception('small console')
windll.kernel32.GetConsoleCursorInfo(hconsole, byref(csinfo))
except Exception:
pass
else:
import re, threading
to_int = lambda number, default: number and int(number) or default
wlock = threading.Lock()
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
class AnsiTerm(object):
def __init__(self):
self.hconsole = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
self.cursor_history = []
self.orig_sbinfo = CONSOLE_SCREEN_BUFFER_INFO()
self.orig_csinfo = CONSOLE_CURSOR_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(self.orig_sbinfo))
windll.kernel32.GetConsoleCursorInfo(hconsole, byref(self.orig_csinfo))
def screen_buffer_info(self):
sbinfo = CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(sbinfo))
return sbinfo
def clear_line(self, param):
mode = param and int(param) or 0
sbinfo = self.screen_buffer_info()
if mode == 1: # Clear from begining of line to cursor position
line_start = COORD(0, sbinfo.CursorPosition.Y)
line_length = sbinfo.Size.X
elif mode == 2: # Clear entire line
line_start = COORD(sbinfo.CursorPosition.X, sbinfo.CursorPosition.Y)
line_length = sbinfo.Size.X - sbinfo.CursorPosition.X
else: # Clear from cursor position to end of line
line_start = sbinfo.CursorPosition
line_length = sbinfo.Size.X - sbinfo.CursorPosition.X
chars_written = c_int()
windll.kernel32.FillConsoleOutputCharacterA(self.hconsole, c_char(' '), line_length, line_start, byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, line_length, line_start, byref(chars_written))
def clear_screen(self, param):
mode = to_int(param, 0)
sbinfo = self.screen_buffer_info()
if mode == 1: # Clear from begining of screen to cursor position
clear_start = COORD(0, 0)
clear_length = sbinfo.CursorPosition.X * sbinfo.CursorPosition.Y
elif mode == 2: # Clear entire screen and return cursor to home
clear_start = COORD(0, 0)
clear_length = sbinfo.Size.X * sbinfo.Size.Y
windll.kernel32.SetConsoleCursorPosition(self.hconsole, clear_start)
else: # Clear from cursor position to end of screen
clear_start = sbinfo.CursorPosition
clear_length = ((sbinfo.Size.X - sbinfo.CursorPosition.X) + sbinfo.Size.X * (sbinfo.Size.Y - sbinfo.CursorPosition.Y))
chars_written = c_int()
windll.kernel32.FillConsoleOutputCharacterA(self.hconsole, c_char(' '), clear_length, clear_start, byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, clear_length, clear_start, byref(chars_written))
def push_cursor(self, param):
sbinfo = self.screen_buffer_info()
self.cursor_history.push(sbinfo.CursorPosition)
def pop_cursor(self, param):
if self.cursor_history:
old_pos = self.cursor_history.pop()
windll.kernel32.SetConsoleCursorPosition(self.hconsole, old_pos)
def set_cursor(self, param):
x, sep, y = param.partition(';')
x = to_int(x, 1) - 1
y = to_int(y, 1) - 1
sbinfo = self.screen_buffer_info()
new_pos = COORD(
min(max(0, x), sbinfo.Size.X),
min(max(0, y), sbinfo.Size.Y)
)
windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos)
def set_column(self, param):
x = to_int(param, 1) - 1
sbinfo = self.screen_buffer_info()
new_pos = COORD(
min(max(0, x), sbinfo.Size.X),
sbinfo.CursorPosition.Y
)
windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos)
def move_cursor(self, x_offset=0, y_offset=0):
sbinfo = self.screen_buffer_info()
new_pos = COORD(
min(max(0, sbinfo.CursorPosition.X + x_offset), sbinfo.Size.X),
min(max(0, sbinfo.CursorPosition.Y + y_offset), sbinfo.Size.Y)
)
windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos)
def move_up(self, param):
self.move_cursor(y_offset = -to_int(param, 1))
def move_down(self, param):
self.move_cursor(y_offset = to_int(param, 1))
def move_left(self, param):
self.move_cursor(x_offset = -to_int(param, 1))
def move_right(self, param):
self.move_cursor(x_offset = to_int(param, 1))
def next_line(self, param):
sbinfo = self.screen_buffer_info()
self.move_cursor(
x_offset = -sbinfo.CursorPosition.X,
y_offset = to_int(param, 1)
)
def prev_line(self, param):
sbinfo = self.screen_buffer_info()
self.move_cursor(
x_offset = -sbinfo.CursorPosition.X,
y_offset = -to_int(param, 1)
)
escape_to_color = { (0, 30): 0x0, #black
(0, 31): 0x4, #red
(0, 32): 0x2, #green
(0, 33): 0x4+0x2, #dark yellow
(0, 34): 0x1, #blue
(0, 35): 0x1+0x4, #purple
(0, 36): 0x2+0x4, #cyan
(0, 37): 0x1+0x2+0x4, #grey
(1, 30): 0x1+0x2+0x4, #dark gray
(1, 31): 0x4+0x8, #red
(1, 32): 0x2+0x8, #light green
(1, 33): 0x4+0x2+0x8, #yellow
(1, 34): 0x1+0x8, #light blue
(1, 35): 0x1+0x4+0x8, #light purple
(1, 36): 0x1+0x2+0x8, #light cyan
(1, 37): 0x1+0x2+0x4+0x8, #white
}
def set_color(self, param):
cols = param.split(';')
attr = self.orig_sbinfo.Attributes
for c in cols:
c = to_int(c, 0)
if c in range(30,38):
attr = (attr & 0xf0) | (self.escape_to_color.get((0,c), 0x7))
elif c in range(40,48):
attr = (attr & 0x0f) | (self.escape_to_color.get((0,c), 0x7) << 8)
elif c in range(90,98):
attr = (attr & 0xf0) | (self.escape_to_color.get((1,c-60), 0x7))
elif c in range(100,108):
attr = (attr & 0x0f) | (self.escape_to_color.get((1,c-60), 0x7) << 8)
elif c == 1:
attr |= 0x08
windll.kernel32.SetConsoleTextAttribute(self.hconsole, attr)
def show_cursor(self,param):
csinfo.bVisible = 1
windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(csinfo))
def hide_cursor(self,param):
csinfo.bVisible = 0
windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(csinfo))
ansi_command_table = {
'A': move_up,
'B': move_down,
'C': move_right,
'D': move_left,
'E': next_line,
'F': prev_line,
'G': set_column,
'H': set_cursor,
'f': set_cursor,
'J': clear_screen,
'K': clear_line,
'h': show_cursor,
'l': hide_cursor,
'm': set_color,
's': push_cursor,
'u': pop_cursor,
}
# Match either the escape sequence or text not containing escape sequence
ansi_tokans = re.compile('(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))')
def write(self, text):
try:
wlock.acquire()
for param, cmd, txt in self.ansi_tokans.findall(text):
if cmd:
cmd_func = self.ansi_command_table.get(cmd)
if cmd_func:
cmd_func(self, param)
else:
chars_written = c_int()
if isinstance(txt, unicode):
windll.kernel32.WriteConsoleW(self.hconsole, txt, len(txt), byref(chars_written), None)
else:
windll.kernel32.WriteConsoleA(self.hconsole, txt, len(txt), byref(chars_written), None)
finally:
wlock.release()
def flush(self):
pass
def isatty(self):
return True
sys.stderr = sys.stdout = AnsiTerm()
os.environ['TERM'] = 'vt100'
ntdb-1.0/buildtools/wafadmin/pproc.py 0000664 0000000 0000000 00000051037 12241515307 0017752 0 ustar 00root root 0000000 0000000 # borrowed from python 2.5.2c1
# Copyright (c) 2003-2005 by Peter Astrand
# Licensed to PSF under a Contributor Agreement.
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
import gc
class CalledProcessError(Exception):
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
if 0:
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
from _subprocess import *
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
import errno
import fcntl
import pickle
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
try:
False
except NameError:
False = 0
True = 1
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxint) >= 0:
try:
_active.remove(inst)
except ValueError:
pass
PIPE = -1
STDOUT = -2
def call(*popenargs, **kwargs):
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
result = []
needquote = False
for arg in seq:
bs_buf = []
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or arg == ""
if needquote:
result.append('"')
for c in arg:
if c == '\\':
bs_buf.append(c)
elif c == '"':
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
_cleanup()
self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows platforms")
else:
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if mswindows:
if stdin is None and p2cwrite is not None:
os.close(p2cwrite)
p2cwrite = None
if stdout is None and c2pread is not None:
os.close(c2pread)
c2pread = None
if stderr is None and errread is not None:
os.close(errread)
errread = None
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, sys=sys):
if not self._child_created:
return
self.poll(_deadstate=sys.maxint)
if self.returncode is None and _active is not None:
_active.append(self)
def communicate(self, input=None):
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
elif self.stderr:
stderr = self.stderr.read()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if mswindows:
def _get_handles(self, stdin, stdout, stderr):
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
if p2cread is not None:
pass
elif stdin is None or stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
if c2pwrite is not None:
pass
elif stdout is None or stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
if errwrite is not None:
pass
elif stderr is None or stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
return DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), 0, 1, DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)), "w9xpopen.exe")
if not os.path.exists(w9xpopen):
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix), "w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is needed for Popen to work with your shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
creationflags |= CREATE_NEW_CONSOLE
try:
hp, ht, pid, tid = CreateProcess(executable, args, None, None, 1, creationflags, env, cwd, startupinfo)
except pywintypes.error, e:
raise WindowsError(*e.args)
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def poll(self, _deadstate=None):
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None
stderr = None
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread, args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread, args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
def _get_handles(self, stdin, stdout, stderr):
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
errwrite = stderr.fileno()
return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines, startupinfo, creationflags, shell,
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite):
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
gc_was_enabled = gc.isenabled()
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
try:
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
if p2cread and p2cread not in (0,):
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread, 1):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite, 2):
os.close(errwrite)
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
os._exit(255)
if gc_was_enabled:
gc.enable()
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
data = os.read(errpipe_read, 1048576)
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
raise RuntimeError("Unknown child exit status!")
def poll(self, _deadstate=None):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
if self.returncode is None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None
stderr = None
if self.stdin:
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
ntdb-1.0/buildtools/wafadmin/py3kfixes.py 0000664 0000000 0000000 00000007445 12241515307 0020560 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2009 (ita)
"""
Fixes for py3k go here
"""
import os
all_modifs = {}
def modif(dir, name, fun):
if name == '*':
lst = []
for y in '. Tools 3rdparty'.split():
for x in os.listdir(os.path.join(dir, y)):
if x.endswith('.py'):
lst.append(y + os.sep + x)
#lst = [y + os.sep + x for x in os.listdir(os.path.join(dir, y)) for y in '. Tools 3rdparty'.split() if x.endswith('.py')]
for x in lst:
modif(dir, x, fun)
return
filename = os.path.join(dir, name)
f = open(filename, 'r')
txt = f.read()
f.close()
txt = fun(txt)
f = open(filename, 'w')
f.write(txt)
f.close()
def subst(filename):
def do_subst(fun):
global all_modifs
try:
all_modifs[filename] += fun
except KeyError:
all_modifs[filename] = [fun]
return fun
return do_subst
@subst('Constants.py')
def r1(code):
code = code.replace("'iluvcuteoverload'", "b'iluvcuteoverload'")
code = code.replace("ABI=7", "ABI=37")
return code
@subst('Tools/ccroot.py')
def r2(code):
code = code.replace("p.stdin.write('\\n')", "p.stdin.write(b'\\n')")
code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")')
return code
@subst('Utils.py')
def r3(code):
code = code.replace("m.update(str(lst))", "m.update(str(lst).encode())")
code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")')
return code
@subst('ansiterm.py')
def r33(code):
code = code.replace('unicode', 'str')
return code
@subst('Task.py')
def r4(code):
code = code.replace("up(self.__class__.__name__)", "up(self.__class__.__name__.encode())")
code = code.replace("up(self.env.variant())", "up(self.env.variant().encode())")
code = code.replace("up(x.parent.abspath())", "up(x.parent.abspath().encode())")
code = code.replace("up(x.name)", "up(x.name.encode())")
code = code.replace('class TaskBase(object):\n\t__metaclass__=store_task_type', 'import binascii\n\nclass TaskBase(object, metaclass=store_task_type):')
code = code.replace('keys=self.cstr_groups.keys()', 'keys=list(self.cstr_groups.keys())')
code = code.replace("sig.encode('hex')", 'binascii.hexlify(sig)')
code = code.replace("os.path.join(Options.cache_global,ssig)", "os.path.join(Options.cache_global,ssig.decode())")
return code
@subst('Build.py')
def r5(code):
code = code.replace("cPickle.dump(data,file,-1)", "cPickle.dump(data,file)")
code = code.replace('for node in src_dir_node.childs.values():', 'for node in list(src_dir_node.childs.values()):')
return code
@subst('*')
def r6(code):
code = code.replace('xrange', 'range')
code = code.replace('iteritems', 'items')
code = code.replace('maxint', 'maxsize')
code = code.replace('iterkeys', 'keys')
code = code.replace('Error,e:', 'Error as e:')
code = code.replace('Exception,e:', 'Exception as e:')
return code
@subst('TaskGen.py')
def r7(code):
code = code.replace('class task_gen(object):\n\t__metaclass__=register_obj', 'class task_gen(object, metaclass=register_obj):')
return code
@subst('Tools/python.py')
def r8(code):
code = code.replace('proc.communicate()[0]', 'proc.communicate()[0].decode("utf-8")')
return code
@subst('Tools/glib2.py')
def r9(code):
code = code.replace('f.write(c)', 'f.write(c.encode("utf-8"))')
return code
@subst('Tools/config_c.py')
def r10(code):
code = code.replace("key=kw['success']", "key=kw['success']\n\t\t\t\ttry:\n\t\t\t\t\tkey=key.decode('utf-8')\n\t\t\t\texcept:\n\t\t\t\t\tpass")
code = code.replace('out=str(out)','out=out.decode("utf-8")')
code = code.replace('err=str(err)','err=err.decode("utf-8")')
return code
@subst('Tools/d.py')
def r11(code):
code = code.replace('ret.strip()', 'ret.strip().decode("utf-8")')
return code
def fixdir(dir):
global all_modifs
for k in all_modifs:
for v in all_modifs[k]:
modif(os.path.join(dir, 'wafadmin'), k, v)
#print('substitutions finished')
ntdb-1.0/buildtools/wafsamba/ 0000775 0000000 0000000 00000000000 12241515307 0016242 5 ustar 00root root 0000000 0000000 ntdb-1.0/buildtools/wafsamba/README 0000664 0000000 0000000 00000000377 12241515307 0017131 0 ustar 00root root 0000000 0000000 This is a set of waf 'tools' to help make building the Samba
components easier, by having common functions in one place. This gives
us a more consistent build, and ensures that our project rules are
obeyed
TODO:
see http://wiki.samba.org/index.php/Waf
ntdb-1.0/buildtools/wafsamba/__init__.py 0000664 0000000 0000000 00000000000 12241515307 0020341 0 ustar 00root root 0000000 0000000 ntdb-1.0/buildtools/wafsamba/configure_file.py 0000664 0000000 0000000 00000002440 12241515307 0021574 0 ustar 00root root 0000000 0000000 # handle substitution of variables in .in files
import Build, sys, Logs
from samba_utils import *
def subst_at_vars(task):
'''substiture @VAR@ style variables in a file'''
env = task.env
src = task.inputs[0].srcpath(env)
tgt = task.outputs[0].bldpath(env)
f = open(src, 'r')
s = f.read()
f.close()
# split on the vars
a = re.split('(@\w+@)', s)
out = []
for v in a:
if re.match('@\w+@', v):
vname = v[1:-1]
if not vname in task.env and vname.upper() in task.env:
vname = vname.upper()
if not vname in task.env:
Logs.error("Unknown substitution %s in %s" % (v, task.name))
sys.exit(1)
v = SUBST_VARS_RECURSIVE(task.env[vname], task.env)
out.append(v)
contents = ''.join(out)
f = open(tgt, 'w')
s = f.write(contents)
f.close()
return 0
def CONFIGURE_FILE(bld, in_file, **kwargs):
'''configure file'''
base=os.path.basename(in_file)
t = bld.SAMBA_GENERATOR('INFILE_%s' % base,
rule = subst_at_vars,
source = in_file + '.in',
target = in_file,
vars = kwargs)
Build.BuildContext.CONFIGURE_FILE = CONFIGURE_FILE
ntdb-1.0/buildtools/wafsamba/gccdeps.py 0000664 0000000 0000000 00000006426 12241515307 0020234 0 ustar 00root root 0000000 0000000 # encoding: utf-8
# Thomas Nagy, 2008-2010 (ita)
"""
Execute the tasks with gcc -MD, read the dependencies from the .d file
and prepare the dependency calculation for the next run
"""
import os, re, threading
import Task, Logs, Utils, preproc
from TaskGen import before, after, feature
lock = threading.Lock()
preprocessor_flag = '-MD'
@feature('cc')
@before('apply_core')
def add_mmd_cc(self):
if self.env.get_flat('CCFLAGS').find(preprocessor_flag) < 0:
self.env.append_value('CCFLAGS', preprocessor_flag)
@feature('cxx')
@before('apply_core')
def add_mmd_cxx(self):
if self.env.get_flat('CXXFLAGS').find(preprocessor_flag) < 0:
self.env.append_value('CXXFLAGS', preprocessor_flag)
def scan(self):
"the scanner does not do anything initially"
nodes = self.generator.bld.node_deps.get(self.unique_id(), [])
names = []
return (nodes, names)
re_o = re.compile("\.o$")
re_src = re.compile("^(\.\.)[\\/](.*)$")
def post_run(self):
# The following code is executed by threads, it is not safe, so a lock is needed...
if getattr(self, 'cached', None):
return Task.Task.post_run(self)
name = self.outputs[0].abspath(self.env)
name = re_o.sub('.d', name)
txt = Utils.readf(name)
#os.unlink(name)
txt = txt.replace('\\\n', '')
lst = txt.strip().split(':')
val = ":".join(lst[1:])
val = val.split()
nodes = []
bld = self.generator.bld
f = re.compile("^("+self.env.variant()+"|\.\.)[\\/](.*)$")
for x in val:
if os.path.isabs(x):
if not preproc.go_absolute:
continue
lock.acquire()
try:
node = bld.root.find_resource(x)
finally:
lock.release()
else:
g = re.search(re_src, x)
if g:
x = g.group(2)
lock.acquire()
try:
node = bld.bldnode.parent.find_resource(x)
finally:
lock.release()
else:
g = re.search(f, x)
if g:
x = g.group(2)
lock.acquire()
try:
node = bld.srcnode.find_resource(x)
finally:
lock.release()
if id(node) == id(self.inputs[0]):
# ignore the source file, it is already in the dependencies
# this way, successful config tests may be retrieved from the cache
continue
if not node:
raise ValueError('could not find %r for %r' % (x, self))
else:
nodes.append(node)
Logs.debug('deps: real scanner for %s returned %s' % (str(self), str(nodes)))
bld.node_deps[self.unique_id()] = nodes
bld.raw_deps[self.unique_id()] = []
try:
del self.cache_sig
except:
pass
Task.Task.post_run(self)
import Constants, Utils
def sig_implicit_deps(self):
try:
return Task.Task.sig_implicit_deps(self)
except Utils.WafError:
return Constants.SIG_NIL
for name in 'cc cxx'.split():
try:
cls = Task.TaskBase.classes[name]
except KeyError:
pass
else:
cls.post_run = post_run
cls.scan = scan
cls.sig_implicit_deps = sig_implicit_deps
ntdb-1.0/buildtools/wafsamba/generic_cc.py 0000664 0000000 0000000 00000003427 12241515307 0020703 0 ustar 00root root 0000000 0000000
# compiler definition for a generic C compiler
# based on suncc.py from waf
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
from compiler_cc import c_compiler
c_compiler['default'] = ['gcc', 'generic_cc']
c_compiler['hpux'] = ['gcc', 'generic_cc']
@conftest
def find_generic_cc(conf):
v = conf.env
cc = None
if v['CC']: cc = v['CC']
elif 'CC' in conf.environ: cc = conf.environ['CC']
if not cc: cc = conf.find_program('cc', var='CC')
if not cc: conf.fatal('generic_cc was not found')
cc = conf.cmd_to_list(cc)
v['CC'] = cc
v['CC_NAME'] = 'generic'
@conftest
def generic_cc_common_flags(conf):
v = conf.env
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o', '']
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CCDEFINES_ST'] = '-D%s'
# v['SONAME_ST'] = '-Wl,-h -Wl,%s'
# v['SHLIB_MARKER'] = '-Bdynamic'
# v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# shared library
# v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
# v['shlib_LINKFLAGS'] = ['-G']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
# v['staticlib_LINKFLAGS'] = ['-Bstatic']
# v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_generic_cc
find_cpp
find_ar
generic_cc_common_flags
cc_load_tools
cc_add_flags
link_add_flags
'''
ntdb-1.0/buildtools/wafsamba/hpuxcc.py 0000664 0000000 0000000 00000003130 12241515307 0020103 0 ustar 00root root 0000000 0000000 # compiler definition for HPUX
# based on suncc.py from waf
import os, optparse, sys
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
import gcc
@conftest
def gcc_modifier_hpux(conf):
v=conf.env
v['CCFLAGS_DEBUG']=['-g']
v['CCFLAGS_RELEASE']=['-O2']
v['CC_SRC_F']=''
v['CC_TGT_F']=['-c','-o','']
v['CPPPATH_ST']='-I%s'
if not v['LINK_CC']:v['LINK_CC']=v['CC']
v['CCLNK_SRC_F']=''
v['CCLNK_TGT_F']=['-o','']
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STATICLIB_ST']='-l%s'
v['STATICLIBPATH_ST']='-L%s'
v['RPATH_ST']='-Wl,-rpath,%s'
v['CCDEFINES_ST']='-D%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']=[]
# v['STATICLIB_MARKER']='-Wl,-Bstatic'
v['FULLSTATIC_MARKER']='-static'
v['program_PATTERN']='%s'
v['shlib_CCFLAGS']=['-fPIC','-DPIC']
v['shlib_LINKFLAGS']=['-shared']
v['shlib_PATTERN']='lib%s.sl'
# v['staticlib_LINKFLAGS']=['-Wl,-Bstatic']
v['staticlib_PATTERN']='lib%s.a'
gcc.gcc_modifier_hpux = gcc_modifier_hpux
from TaskGen import feature, after
@feature('cprogram', 'cshlib')
@after('apply_link', 'apply_lib_vars', 'apply_obj_vars')
def hpux_addfullpath(self):
if sys.platform == 'hp-ux11':
link = getattr(self, 'link_task', None)
if link:
lst = link.env.LINKFLAGS
buf = []
for x in lst:
if x.startswith('-L'):
p2 = x[2:]
if not os.path.isabs(p2):
x = x[:2] + self.bld.srcnode.abspath(link.env) + "/../" + x[2:].lstrip('.')
buf.append(x)
link.env.LINKFLAGS = buf
ntdb-1.0/buildtools/wafsamba/irixcc.py 0000664 0000000 0000000 00000003721 12241515307 0020100 0 ustar 00root root 0000000 0000000
# compiler definition for irix/MIPSpro cc compiler
# based on suncc.py from waf
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
from compiler_cc import c_compiler
c_compiler['irix'] = ['gcc', 'irixcc']
@conftest
def find_irixcc(conf):
v = conf.env
cc = None
if v['CC']: cc = v['CC']
elif 'CC' in conf.environ: cc = conf.environ['CC']
if not cc: cc = conf.find_program('cc', var='CC')
if not cc: conf.fatal('irixcc was not found')
cc = conf.cmd_to_list(cc)
try:
if Utils.cmd_output(cc + ['-c99'] + ['-version']) != '':
conf.fatal('irixcc %r was not found' % cc)
except ValueError:
conf.fatal('irixcc -v could not be executed')
conf.env.append_unique('CCFLAGS', '-c99')
v['CC'] = cc
v['CC_NAME'] = 'irix'
@conftest
def irixcc_common_flags(conf):
v = conf.env
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o', '']
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CCDEFINES_ST'] = '-D%s'
# v['SONAME_ST'] = '-Wl,-h -Wl,%s'
# v['SHLIB_MARKER'] = '-Bdynamic'
# v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# shared library
# v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
# v['shlib_LINKFLAGS'] = ['-G']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
# v['staticlib_LINKFLAGS'] = ['-Bstatic']
# v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_irixcc
find_cpp
find_ar
irixcc_common_flags
cc_load_tools
cc_add_flags
link_add_flags
'''
ntdb-1.0/buildtools/wafsamba/nothreads.py 0000664 0000000 0000000 00000014456 12241515307 0020615 0 ustar 00root root 0000000 0000000 # encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
# this replaces the core of Runner.py in waf with a varient that works
# on systems with completely broken threading (such as Python 2.5.x on
# AIX). For simplicity we enable this when JOBS=1, which is triggered
# by the compatibility makefile used for the waf build. That also ensures
# this code is tested, as it means it is used in the build farm, and by
# anyone using 'make' to build Samba with waf
"Execute the tasks"
import sys, random, time, threading, traceback, os
try: from Queue import Queue
except ImportError: from queue import Queue
import Build, Utils, Logs, Options
from Logs import debug, error
from Constants import *
GAP = 15
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
class TaskConsumer(object):
consumers = 1
def process(tsk):
m = tsk.master
if m.stop:
m.out.put(tsk)
return
try:
tsk.generator.bld.printout(tsk.display())
if tsk.__class__.stat: ret = tsk.__class__.stat(tsk)
# actual call to task's run() function
else: ret = tsk.call_run()
except Exception, e:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
# TODO cleanup
m.error_handler(tsk)
m.out.put(tsk)
return
if ret:
tsk.err_code = ret
tsk.hasrun = CRASHED
else:
try:
tsk.post_run()
except Utils.WafError:
pass
except Exception:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
else:
tsk.hasrun = SUCCESS
if tsk.hasrun != SUCCESS:
m.error_handler(tsk)
m.out.put(tsk)
class Parallel(object):
"""
keep the consumer threads busy, and avoid consuming cpu cycles
when no more tasks can be added (end of the build, etc)
"""
def __init__(self, bld, j=2):
# number of consumers
self.numjobs = j
self.manager = bld.task_manager
self.manager.current_group = 0
self.total = self.manager.total()
# tasks waiting to be processed - IMPORTANT
self.outstanding = []
self.maxjobs = MAXJOBS
# tasks that are awaiting for another task to complete
self.frozen = []
# tasks returned by the consumers
self.out = Queue(0)
self.count = 0 # tasks not in the producer area
self.processed = 1 # progress indicator
self.stop = False # error condition to stop the build
self.error = False # error flag
def get_next(self):
"override this method to schedule the tasks in a particular order"
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self, tsk):
"override this method to schedule the tasks in a particular order"
# TODO consider using a deque instead
if random.randint(0, 1):
self.frozen.insert(0, tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
"called to set the next group of tasks"
while self.count > self.numjobs + GAP or self.count >= self.maxjobs:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
if self.frozen:
self.outstanding += self.frozen
self.frozen = []
elif not self.count:
(jobs, tmp) = self.manager.get_next_set()
if jobs is not None:
self.maxjobs = jobs
if tmp:
self.outstanding += tmp
break
def get_out(self):
"the tasks that are put to execute are all collected using get_out"
ret = self.out.get()
self.manager.add_finished(ret)
if not self.stop and getattr(ret, 'more_tasks', None):
self.outstanding += ret.more_tasks
self.total += len(ret.more_tasks)
self.count -= 1
def error_handler(self, tsk):
"by default, errors make the build stop (not thread safe so be careful)"
if not Options.options.keep:
self.stop = True
self.error = True
def start(self):
"execute the tasks"
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
self.manager.add_finished(tsk)
continue
try:
st = tsk.runnable_status()
except Exception, e:
self.processed += 1
if self.stop and not Options.options.keep:
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
continue
self.error_handler(tsk)
self.manager.add_finished(tsk)
tsk.hasrun = EXCEPTION
tsk.err_msg = Utils.ex_stack()
continue
if st == ASK_LATER:
self.postpone(tsk)
elif st == SKIP_ME:
self.processed += 1
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
else:
# run me: put the task in ready queue
tsk.position = (self.processed, self.total)
self.count += 1
self.processed += 1
tsk.master = self
process(tsk)
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
#print loop
assert (self.count == 0 or self.stop)
# enable nothreads
import Runner
Runner.process = process
Runner.Parallel = Parallel
ntdb-1.0/buildtools/wafsamba/pkgconfig.py 0000664 0000000 0000000 00000004453 12241515307 0020571 0 ustar 00root root 0000000 0000000 # handle substitution of variables in pc files
import Build, sys, Logs
from samba_utils import *
def subst_at_vars(task):
'''substiture @VAR@ style variables in a file'''
src = task.inputs[0].srcpath(task.env)
tgt = task.outputs[0].bldpath(task.env)
f = open(src, 'r')
s = f.read()
f.close()
# split on the vars
a = re.split('(@\w+@)', s)
out = []
done_var = {}
back_sub = [ ('PREFIX', '${prefix}'), ('EXEC_PREFIX', '${exec_prefix}')]
for v in a:
if re.match('@\w+@', v):
vname = v[1:-1]
if not vname in task.env and vname.upper() in task.env:
vname = vname.upper()
if not vname in task.env:
Logs.error("Unknown substitution %s in %s" % (v, task.name))
sys.exit(1)
v = SUBST_VARS_RECURSIVE(task.env[vname], task.env)
# now we back substitute the allowed pc vars
for (b, m) in back_sub:
s = task.env[b]
if s == v[0:len(s)]:
if not b in done_var:
# we don't want to substitute the first usage
done_var[b] = True
else:
v = m + v[len(s):]
break
out.append(v)
contents = ''.join(out)
f = open(tgt, 'w')
s = f.write(contents)
f.close()
return 0
def PKG_CONFIG_FILES(bld, pc_files, vnum=None):
'''install some pkg_config pc files'''
dest = '${PKGCONFIGDIR}'
dest = bld.EXPAND_VARIABLES(dest)
for f in TO_LIST(pc_files):
base=os.path.basename(f)
t = bld.SAMBA_GENERATOR('PKGCONFIG_%s' % base,
rule=subst_at_vars,
source=f+'.in',
target=f)
bld.add_manual_dependency(bld.path.find_or_declare(f), bld.env['PREFIX'])
t.vars = []
if t.env.RPATH_ON_INSTALL:
t.env.LIB_RPATH = t.env.RPATH_ST % t.env.LIBDIR
else:
t.env.LIB_RPATH = ''
if vnum:
t.env.PACKAGE_VERSION = vnum
for v in [ 'PREFIX', 'EXEC_PREFIX', 'LIB_RPATH' ]:
t.vars.append(t.env[v])
bld.INSTALL_FILES(dest, f, flat=True, destname=base)
Build.BuildContext.PKG_CONFIG_FILES = PKG_CONFIG_FILES
ntdb-1.0/buildtools/wafsamba/samba3.py 0000664 0000000 0000000 00000012317 12241515307 0017766 0 ustar 00root root 0000000 0000000 # a waf tool to add autoconf-like macros to the configure section
# and for SAMBA_ macros for building libraries, binaries etc
import Options, Build, os
from optparse import SUPPRESS_HELP
from samba_utils import os_path_relpath, TO_LIST
from samba_autoconf import library_flags
def SAMBA3_ADD_OPTION(opt, option, help=(), dest=None, default=True,
with_name="with", without_name="without"):
if default is None:
default_str="auto"
elif default == True:
default_str="yes"
elif default == False:
default_str="no"
else:
default_str=str(default)
if help == ():
help = ("Build with %s support (default=%s)" % (option, default_str))
if dest is None:
dest = "with_%s" % option.replace('-', '_')
with_val = "--%s-%s" % (with_name, option)
without_val = "--%s-%s" % (without_name, option)
#FIXME: This is broken and will always default to "default" no matter if
# --with or --without is chosen.
opt.add_option(with_val, help=help, action="store_true", dest=dest,
default=default)
opt.add_option(without_val, help=SUPPRESS_HELP, action="store_false",
dest=dest)
Options.Handler.SAMBA3_ADD_OPTION = SAMBA3_ADD_OPTION
def SAMBA3_IS_STATIC_MODULE(bld, module):
'''Check whether module is in static list'''
if module in bld.env['static_modules']:
return True
return False
Build.BuildContext.SAMBA3_IS_STATIC_MODULE = SAMBA3_IS_STATIC_MODULE
def SAMBA3_IS_SHARED_MODULE(bld, module):
'''Check whether module is in shared list'''
if module in bld.env['shared_modules']:
return True
return False
Build.BuildContext.SAMBA3_IS_SHARED_MODULE = SAMBA3_IS_SHARED_MODULE
def SAMBA3_IS_ENABLED_MODULE(bld, module):
'''Check whether module is in either shared or static list '''
return SAMBA3_IS_STATIC_MODULE(bld, module) or SAMBA3_IS_SHARED_MODULE(bld, module)
Build.BuildContext.SAMBA3_IS_ENABLED_MODULE = SAMBA3_IS_ENABLED_MODULE
def s3_fix_kwargs(bld, kwargs):
'''fix the build arguments for s3 build rules to include the
necessary includes, subdir and cflags options '''
s3dir = os.path.join(bld.env.srcdir, 'source3')
s3reldir = os_path_relpath(s3dir, bld.curdir)
# the extra_includes list is relative to the source3 directory
extra_includes = [ '.', 'include', 'lib', '../lib/tdb_compat' ]
# local heimdal paths only included when USING_SYSTEM_KRB5 is not set
if not bld.CONFIG_SET("USING_SYSTEM_KRB5"):
extra_includes += [ '../source4/heimdal/lib/com_err',
'../source4/heimdal/lib/krb5',
'../source4/heimdal/lib/gssapi',
'../source4/heimdal_build',
'../bin/default/source4/heimdal/lib/asn1' ]
if bld.CONFIG_SET('USING_SYSTEM_TDB'):
(tdb_includes, tdb_ldflags, tdb_cpppath) = library_flags(bld, 'tdb')
extra_includes += tdb_cpppath
else:
extra_includes += [ '../lib/tdb/include' ]
if bld.CONFIG_SET('USING_SYSTEM_TEVENT'):
(tevent_includes, tevent_ldflags, tevent_cpppath) = library_flags(bld, 'tevent')
extra_includes += tevent_cpppath
else:
extra_includes += [ '../lib/tevent' ]
if bld.CONFIG_SET('USING_SYSTEM_TALLOC'):
(talloc_includes, talloc_ldflags, talloc_cpppath) = library_flags(bld, 'talloc')
extra_includes += talloc_cpppath
else:
extra_includes += [ '../lib/talloc' ]
if bld.CONFIG_SET('USING_SYSTEM_POPT'):
(popt_includes, popt_ldflags, popt_cpppath) = library_flags(bld, 'popt')
extra_includes += popt_cpppath
else:
extra_includes += [ '../lib/popt' ]
if bld.CONFIG_SET('USING_SYSTEM_INIPARSER'):
(iniparser_includes, iniparser_ldflags, iniparser_cpppath) = library_flags(bld, 'iniparser')
extra_includes += iniparser_cpppath
else:
extra_includes += [ '../lib/iniparser' ]
# s3 builds assume that they will have a bunch of extra include paths
includes = []
for d in extra_includes:
includes += [ os.path.join(s3reldir, d) ]
# the rule may already have some includes listed
if 'includes' in kwargs:
includes += TO_LIST(kwargs['includes'])
kwargs['includes'] = includes
# these wrappers allow for mixing of S3 and S4 build rules in the one build
def SAMBA3_LIBRARY(bld, name, *args, **kwargs):
s3_fix_kwargs(bld, kwargs)
return bld.SAMBA_LIBRARY(name, *args, **kwargs)
Build.BuildContext.SAMBA3_LIBRARY = SAMBA3_LIBRARY
def SAMBA3_MODULE(bld, name, *args, **kwargs):
s3_fix_kwargs(bld, kwargs)
return bld.SAMBA_MODULE(name, *args, **kwargs)
Build.BuildContext.SAMBA3_MODULE = SAMBA3_MODULE
def SAMBA3_SUBSYSTEM(bld, name, *args, **kwargs):
s3_fix_kwargs(bld, kwargs)
return bld.SAMBA_SUBSYSTEM(name, *args, **kwargs)
Build.BuildContext.SAMBA3_SUBSYSTEM = SAMBA3_SUBSYSTEM
def SAMBA3_BINARY(bld, name, *args, **kwargs):
s3_fix_kwargs(bld, kwargs)
return bld.SAMBA_BINARY(name, *args, **kwargs)
Build.BuildContext.SAMBA3_BINARY = SAMBA3_BINARY
def SAMBA3_PYTHON(bld, name, *args, **kwargs):
s3_fix_kwargs(bld, kwargs)
return bld.SAMBA_PYTHON(name, *args, **kwargs)
Build.BuildContext.SAMBA3_PYTHON = SAMBA3_PYTHON
ntdb-1.0/buildtools/wafsamba/samba_abi.py 0000664 0000000 0000000 00000021140 12241515307 0020510 0 ustar 00root root 0000000 0000000 # functions for handling ABI checking of libraries
import Options, Utils, os, Logs, samba_utils, sys, Task, fnmatch, re, Build
from TaskGen import feature, before, after
# these type maps cope with platform specific names for common types
# please add new type mappings into the list below
abi_type_maps = {
'_Bool' : 'bool',
'struct __va_list_tag *' : 'va_list'
}
version_key = lambda x: map(int, x.split("."))
def normalise_signature(sig):
'''normalise a signature from gdb'''
sig = sig.strip()
sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}$', r'\1', sig)
sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}(\s0x[0-9a-f]+\s<\w+>)+$', r'\1', sig)
sig = re.sub('^\$[0-9]+\s=\s(0x[0-9a-f]+)\s?(<\w+>)?$', r'\1', sig)
sig = re.sub('0x[0-9a-f]+', '0xXXXX', sig)
sig = re.sub('", ', r'\1"', sig)
for t in abi_type_maps:
# we need to cope with non-word characters in mapped types
m = t
m = m.replace('*', '\*')
if m[-1].isalnum() or m[-1] == '_':
m += '\\b'
if m[0].isalnum() or m[0] == '_':
m = '\\b' + m
sig = re.sub(m, abi_type_maps[t], sig)
return sig
def normalise_varargs(sig):
'''cope with older versions of gdb'''
sig = re.sub(',\s\.\.\.', '', sig)
return sig
def parse_sigs(sigs, abi_match):
'''parse ABI signatures file'''
abi_match = samba_utils.TO_LIST(abi_match)
ret = {}
a = sigs.split('\n')
for s in a:
if s.find(':') == -1:
continue
sa = s.split(':')
if abi_match:
matched = False
negative = False
for p in abi_match:
if p[0] == '!' and fnmatch.fnmatch(sa[0], p[1:]):
negative = True
break
elif fnmatch.fnmatch(sa[0], p):
matched = True
break
if (not matched) and negative:
continue
Logs.debug("%s -> %s" % (sa[1], normalise_signature(sa[1])))
ret[sa[0]] = normalise_signature(sa[1])
return ret
def save_sigs(sig_file, parsed_sigs):
'''save ABI signatures to a file'''
sigs = ''
for s in sorted(parsed_sigs.keys()):
sigs += '%s: %s\n' % (s, parsed_sigs[s])
return samba_utils.save_file(sig_file, sigs, create_dir=True)
def abi_check_task(self):
'''check if the ABI has changed'''
abi_gen = self.ABI_GEN
libpath = self.inputs[0].abspath(self.env)
libname = os.path.basename(libpath)
sigs = Utils.cmd_output([abi_gen, libpath])
parsed_sigs = parse_sigs(sigs, self.ABI_MATCH)
sig_file = self.ABI_FILE
old_sigs = samba_utils.load_file(sig_file)
if old_sigs is None or Options.options.ABI_UPDATE:
if not save_sigs(sig_file, parsed_sigs):
raise Utils.WafError('Failed to save ABI file "%s"' % sig_file)
Logs.warn('Generated ABI signatures %s' % sig_file)
return
parsed_old_sigs = parse_sigs(old_sigs, self.ABI_MATCH)
# check all old sigs
got_error = False
for s in parsed_old_sigs:
if not s in parsed_sigs:
Logs.error('%s: symbol %s has been removed - please update major version\n\tsignature: %s' % (
libname, s, parsed_old_sigs[s]))
got_error = True
elif normalise_varargs(parsed_old_sigs[s]) != normalise_varargs(parsed_sigs[s]):
Logs.error('%s: symbol %s has changed - please update major version\n\told_signature: %s\n\tnew_signature: %s' % (
libname, s, parsed_old_sigs[s], parsed_sigs[s]))
got_error = True
for s in parsed_sigs:
if not s in parsed_old_sigs:
Logs.error('%s: symbol %s has been added - please mark it _PRIVATE_ or update minor version\n\tsignature: %s' % (
libname, s, parsed_sigs[s]))
got_error = True
if got_error:
raise Utils.WafError('ABI for %s has changed - please fix library version then build with --abi-update\nSee http://wiki.samba.org/index.php/Waf#ABI_Checking for more information\nIf you have not changed any ABI, and your platform always gives this error, please configure with --abi-check-disable to skip this check' % libname)
t = Task.task_type_from_func('abi_check', abi_check_task, color='BLUE', ext_in='.bin')
t.quiet = True
# allow "waf --abi-check" to force re-checking the ABI
if '--abi-check' in sys.argv:
Task.always_run(t)
@after('apply_link')
@feature('abi_check')
def abi_check(self):
'''check that ABI matches saved signatures'''
env = self.bld.env
if not env.ABI_CHECK or self.abi_directory is None:
return
# if the platform doesn't support -fvisibility=hidden then the ABI
# checks become fairly meaningless
if not env.HAVE_VISIBILITY_ATTR:
return
topsrc = self.bld.srcnode.abspath()
abi_gen = os.path.join(topsrc, 'buildtools/scripts/abi_gen.sh')
abi_file = "%s/%s-%s.sigs" % (self.abi_directory, self.name, self.vnum)
tsk = self.create_task('abi_check', self.link_task.outputs[0])
tsk.ABI_FILE = abi_file
tsk.ABI_MATCH = self.abi_match
tsk.ABI_GEN = abi_gen
def abi_process_file(fname, version, symmap):
'''process one ABI file, adding new symbols to the symmap'''
f = open(fname, mode='r')
for line in f:
symname = line.split(":")[0]
if not symname in symmap:
symmap[symname] = version
f.close()
def abi_write_vscript(f, libname, current_version, versions, symmap, abi_match):
"""Write a vscript file for a library in --version-script format.
:param f: File-like object to write to
:param libname: Name of the library, uppercased
:param current_version: Current version
:param versions: Versions to consider
:param symmap: Dictionary mapping symbols -> version
:param abi_match: List of symbols considered to be public in the current
version
"""
invmap = {}
for s in symmap:
invmap.setdefault(symmap[s], []).append(s)
last_key = ""
versions = sorted(versions, key=version_key)
for k in versions:
symver = "%s_%s" % (libname, k)
if symver == current_version:
break
f.write("%s {\n" % symver)
if k in sorted(invmap.keys()):
f.write("\tglobal:\n")
for s in invmap.get(k, []):
f.write("\t\t%s;\n" % s);
f.write("}%s;\n\n" % last_key)
last_key = " %s" % symver
f.write("%s {\n" % current_version)
local_abi = filter(lambda x: x[0] == '!', abi_match)
global_abi = filter(lambda x: x[0] != '!', abi_match)
f.write("\tglobal:\n")
if len(global_abi) > 0:
for x in global_abi:
f.write("\t\t%s;\n" % x)
else:
f.write("\t\t*;\n")
if abi_match != ["*"]:
f.write("\tlocal:\n")
for x in local_abi:
f.write("\t\t%s;\n" % x[1:])
if len(global_abi) > 0:
f.write("\t\t*;\n")
f.write("};\n")
def abi_build_vscript(task):
'''generate a vscript file for our public libraries'''
tgt = task.outputs[0].bldpath(task.env)
symmap = {}
versions = []
for f in task.inputs:
fname = f.abspath(task.env)
basename = os.path.basename(fname)
version = basename[len(task.env.LIBNAME)+1:-len(".sigs")]
versions.append(version)
abi_process_file(fname, version, symmap)
f = open(tgt, mode='w')
try:
abi_write_vscript(f, task.env.LIBNAME, task.env.VERSION, versions,
symmap, task.env.ABI_MATCH)
finally:
f.close()
def ABI_VSCRIPT(bld, libname, abi_directory, version, vscript, abi_match=None):
'''generate a vscript file for our public libraries'''
if abi_directory:
source = bld.path.ant_glob('%s/%s-[0-9]*.sigs' % (abi_directory, libname))
def abi_file_key(path):
return version_key(path[:-len(".sigs")].rsplit("-")[-1])
source = sorted(source.split(), key=abi_file_key)
else:
source = ''
libname = os.path.basename(libname)
version = os.path.basename(version)
libname = libname.replace("-", "_").replace("+","_").upper()
version = version.replace("-", "_").replace("+","_").upper()
t = bld.SAMBA_GENERATOR(vscript,
rule=abi_build_vscript,
source=source,
group='vscripts',
target=vscript)
if abi_match is None:
abi_match = ["*"]
else:
abi_match = samba_utils.TO_LIST(abi_match)
t.env.ABI_MATCH = abi_match
t.env.VERSION = version
t.env.LIBNAME = libname
t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH']
Build.BuildContext.ABI_VSCRIPT = ABI_VSCRIPT
ntdb-1.0/buildtools/wafsamba/samba_autoconf.py 0000664 0000000 0000000 00000060675 12241515307 0021613 0 ustar 00root root 0000000 0000000 # a waf tool to add autoconf-like macros to the configure section
import Build, os, sys, Options, preproc, Logs
import string
from Configure import conf
from samba_utils import *
import samba_cross
missing_headers = set()
####################################################
# some autoconf like helpers, to make the transition
# to waf a bit easier for those used to autoconf
# m4 files
@runonce
@conf
def DEFINE(conf, d, v, add_to_cflags=False, quote=False):
'''define a config option'''
conf.define(d, v, quote=quote)
if add_to_cflags:
conf.env.append_value('CCDEFINES', d + '=' + str(v))
def hlist_to_string(conf, headers=None):
'''convert a headers list to a set of #include lines'''
hdrs=''
hlist = conf.env.hlist
if headers:
hlist = hlist[:]
hlist.extend(TO_LIST(headers))
for h in hlist:
hdrs += '#include <%s>\n' % h
return hdrs
@conf
def COMPOUND_START(conf, msg):
'''start a compound test'''
def null_check_message_1(self,*k,**kw):
return
def null_check_message_2(self,*k,**kw):
return
v = getattr(conf.env, 'in_compound', [])
if v != [] and v != 0:
conf.env.in_compound = v + 1
return
conf.check_message_1(msg)
conf.saved_check_message_1 = conf.check_message_1
conf.check_message_1 = null_check_message_1
conf.saved_check_message_2 = conf.check_message_2
conf.check_message_2 = null_check_message_2
conf.env.in_compound = 1
@conf
def COMPOUND_END(conf, result):
'''start a compound test'''
conf.env.in_compound -= 1
if conf.env.in_compound != 0:
return
conf.check_message_1 = conf.saved_check_message_1
conf.check_message_2 = conf.saved_check_message_2
p = conf.check_message_2
if result is True:
p('ok')
elif not result:
p('not found', 'YELLOW')
else:
p(result)
@feature('nolink')
def nolink(self):
'''using the nolink type in conf.check() allows us to avoid
the link stage of a test, thus speeding it up for tests
that where linking is not needed'''
pass
def CHECK_HEADER(conf, h, add_headers=False, lib=None):
'''check for a header'''
if h in missing_headers and lib is None:
return False
d = h.upper().replace('/', '_')
d = d.replace('.', '_')
d = d.replace('-', '_')
d = 'HAVE_%s' % d
if CONFIG_SET(conf, d):
if add_headers:
if not h in conf.env.hlist:
conf.env.hlist.append(h)
return True
(ccflags, ldflags, cpppath) = library_flags(conf, lib)
hdrs = hlist_to_string(conf, headers=h)
if lib is None:
lib = ""
ret = conf.check(fragment='%s\nint main(void) { return 0; }' % hdrs,
type='nolink',
execute=0,
ccflags=ccflags,
includes=cpppath,
uselib=lib.upper(),
msg="Checking for header %s" % h)
if not ret:
missing_headers.add(h)
return False
conf.DEFINE(d, 1)
if add_headers and not h in conf.env.hlist:
conf.env.hlist.append(h)
return ret
@conf
def CHECK_HEADERS(conf, headers, add_headers=False, together=False, lib=None):
'''check for a list of headers
when together==True, then the headers accumulate within this test.
This is useful for interdependent headers
'''
ret = True
if not add_headers and together:
saved_hlist = conf.env.hlist[:]
set_add_headers = True
else:
set_add_headers = add_headers
for hdr in TO_LIST(headers):
if not CHECK_HEADER(conf, hdr, set_add_headers, lib=lib):
ret = False
if not add_headers and together:
conf.env.hlist = saved_hlist
return ret
def header_list(conf, headers=None, lib=None):
'''form a list of headers which exist, as a string'''
hlist=[]
if headers is not None:
for h in TO_LIST(headers):
if CHECK_HEADER(conf, h, add_headers=False, lib=lib):
hlist.append(h)
return hlist_to_string(conf, headers=hlist)
@conf
def CHECK_TYPE(conf, t, alternate=None, headers=None, define=None, lib=None, msg=None):
'''check for a single type'''
if define is None:
define = 'HAVE_' + t.upper().replace(' ', '_')
if msg is None:
msg='Checking for %s' % t
ret = CHECK_CODE(conf, '%s _x' % t,
define,
execute=False,
headers=headers,
local_include=False,
msg=msg,
lib=lib,
link=False)
if not ret and alternate:
conf.DEFINE(t, alternate)
return ret
@conf
def CHECK_TYPES(conf, list, headers=None, define=None, alternate=None, lib=None):
'''check for a list of types'''
ret = True
for t in TO_LIST(list):
if not CHECK_TYPE(conf, t, headers=headers,
define=define, alternate=alternate, lib=lib):
ret = False
return ret
@conf
def CHECK_TYPE_IN(conf, t, headers=None, alternate=None, define=None):
'''check for a single type with a header'''
return CHECK_TYPE(conf, t, headers=headers, alternate=alternate, define=define)
@conf
def CHECK_VARIABLE(conf, v, define=None, always=False,
headers=None, msg=None, lib=None):
'''check for a variable declaration (or define)'''
if define is None:
define = 'HAVE_%s' % v.upper()
if msg is None:
msg="Checking for variable %s" % v
return CHECK_CODE(conf,
# we need to make sure the compiler doesn't
# optimize it out...
'''
#ifndef %s
void *_x; _x=(void *)&%s; return (int)_x;
#endif
return 0
''' % (v, v),
execute=False,
link=False,
msg=msg,
local_include=False,
lib=lib,
headers=headers,
define=define,
always=always)
@conf
def CHECK_DECLS(conf, vars, reverse=False, headers=None, always=False):
'''check a list of variable declarations, using the HAVE_DECL_xxx form
of define
When reverse==True then use HAVE_xxx_DECL instead of HAVE_DECL_xxx
'''
ret = True
for v in TO_LIST(vars):
if not reverse:
define='HAVE_DECL_%s' % v.upper()
else:
define='HAVE_%s_DECL' % v.upper()
if not CHECK_VARIABLE(conf, v,
define=define,
headers=headers,
msg='Checking for declaration of %s' % v,
always=always):
ret = False
return ret
def CHECK_FUNC(conf, f, link=True, lib=None, headers=None):
'''check for a function'''
define='HAVE_%s' % f.upper()
ret = False
conf.COMPOUND_START('Checking for %s' % f)
if link is None or link:
ret = CHECK_CODE(conf,
# this is based on the autoconf strategy
'''
#define %s __fake__%s
#ifdef HAVE_LIMITS_H
# include
#else
# include
#endif
#undef %s
#if defined __stub_%s || defined __stub___%s
#error "bad glibc stub"
#endif
extern char %s();
int main() { return %s(); }
''' % (f, f, f, f, f, f, f),
execute=False,
link=True,
addmain=False,
add_headers=False,
define=define,
local_include=False,
lib=lib,
headers=headers,
msg='Checking for %s' % f)
if not ret:
ret = CHECK_CODE(conf,
# it might be a macro
# we need to make sure the compiler doesn't
# optimize it out...
'void *__x = (void *)%s; return (int)__x' % f,
execute=False,
link=True,
addmain=True,
add_headers=True,
define=define,
local_include=False,
lib=lib,
headers=headers,
msg='Checking for macro %s' % f)
if not ret and (link is None or not link):
ret = CHECK_VARIABLE(conf, f,
define=define,
headers=headers,
msg='Checking for declaration of %s' % f)
conf.COMPOUND_END(ret)
return ret
@conf
def CHECK_FUNCS(conf, list, link=True, lib=None, headers=None):
'''check for a list of functions'''
ret = True
for f in TO_LIST(list):
if not CHECK_FUNC(conf, f, link=link, lib=lib, headers=headers):
ret = False
return ret
@conf
def CHECK_SIZEOF(conf, vars, headers=None, define=None):
'''check the size of a type'''
ret = True
for v in TO_LIST(vars):
v_define = define
if v_define is None:
v_define = 'SIZEOF_%s' % v.upper().replace(' ', '_')
if not CHECK_CODE(conf,
'printf("%%u", (unsigned)sizeof(%s))' % v,
define=v_define,
execute=True,
define_ret=True,
quote=False,
headers=headers,
local_include=False,
msg="Checking size of %s" % v):
ret = False
return ret
@conf
def CHECK_VALUEOF(conf, v, headers=None, define=None):
'''check the value of a variable/define'''
ret = True
v_define = define
if v_define is None:
v_define = 'VALUEOF_%s' % v.upper().replace(' ', '_')
if CHECK_CODE(conf,
'printf("%%u", (unsigned)(%s))' % v,
define=v_define,
execute=True,
define_ret=True,
quote=False,
headers=headers,
local_include=False,
msg="Checking value of %s" % v):
return int(conf.env[v_define])
return None
@conf
def CHECK_CODE(conf, code, define,
always=False, execute=False, addmain=True,
add_headers=True, mandatory=False,
headers=None, msg=None, cflags='', includes='# .',
local_include=True, lib=None, link=True,
define_ret=False, quote=False,
on_target=True):
'''check if some code compiles and/or runs'''
if CONFIG_SET(conf, define):
return True
if headers is not None:
CHECK_HEADERS(conf, headers=headers, lib=lib)
if add_headers:
hdrs = header_list(conf, headers=headers, lib=lib)
else:
hdrs = ''
if execute:
execute = 1
else:
execute = 0
defs = conf.get_config_header()
if addmain:
fragment='%s\n%s\n int main(void) { %s; return 0; }\n' % (defs, hdrs, code)
else:
fragment='%s\n%s\n%s\n' % (defs, hdrs, code)
if msg is None:
msg="Checking for %s" % define
cflags = TO_LIST(cflags)
if local_include:
cflags.append('-I%s' % conf.curdir)
if not link:
type='nolink'
else:
type='cprogram'
uselib = TO_LIST(lib)
(ccflags, ldflags, cpppath) = library_flags(conf, uselib)
includes = TO_LIST(includes)
includes.extend(cpppath)
uselib = [l.upper() for l in uselib]
cflags.extend(ccflags)
if on_target:
exec_args = conf.SAMBA_CROSS_ARGS(msg=msg)
else:
exec_args = []
conf.COMPOUND_START(msg)
ret = conf.check(fragment=fragment,
execute=execute,
define_name = define,
mandatory = mandatory,
ccflags=cflags,
ldflags=ldflags,
includes=includes,
uselib=uselib,
type=type,
msg=msg,
quote=quote,
exec_args=exec_args,
define_ret=define_ret)
if not ret and CONFIG_SET(conf, define):
# sometimes conf.check() returns false, but it
# sets the define. Maybe a waf bug?
ret = True
if ret:
if not define_ret:
conf.DEFINE(define, 1)
conf.COMPOUND_END(True)
else:
conf.COMPOUND_END(conf.env[define])
return True
if always:
conf.DEFINE(define, 0)
conf.COMPOUND_END(False)
return False
@conf
def CHECK_STRUCTURE_MEMBER(conf, structname, member,
always=False, define=None, headers=None):
'''check for a structure member'''
if define is None:
define = 'HAVE_%s' % member.upper()
return CHECK_CODE(conf,
'%s s; void *_x; _x=(void *)&s.%s' % (structname, member),
define,
execute=False,
link=False,
always=always,
headers=headers,
local_include=False,
msg="Checking for member %s in %s" % (member, structname))
@conf
def CHECK_CFLAGS(conf, cflags, fragment='int main(void) { return 0; }\n'):
'''check if the given cflags are accepted by the compiler
'''
return conf.check(fragment=fragment,
execute=0,
type='nolink',
ccflags=cflags,
msg="Checking compiler accepts %s" % cflags)
@conf
def CHECK_LDFLAGS(conf, ldflags):
'''check if the given ldflags are accepted by the linker
'''
return conf.check(fragment='int main(void) { return 0; }\n',
execute=0,
ldflags=ldflags,
msg="Checking linker accepts %s" % ldflags)
@conf
def CONFIG_GET(conf, option):
'''return True if a configuration option was found'''
if (option in conf.env):
return conf.env[option]
else:
return None
@conf
def CONFIG_SET(conf, option):
'''return True if a configuration option was found'''
if option not in conf.env:
return False
v = conf.env[option]
if v is None:
return False
if v == []:
return False
if v == ():
return False
return True
Build.BuildContext.CONFIG_SET = CONFIG_SET
Build.BuildContext.CONFIG_GET = CONFIG_GET
def library_flags(self, libs):
'''work out flags from pkg_config'''
ccflags = []
ldflags = []
cpppath = []
for lib in TO_LIST(libs):
# note that we do not add the -I and -L in here, as that is added by the waf
# core. Adding it here would just change the order that it is put on the link line
# which can cause system paths to be added before internal libraries
extra_ccflags = TO_LIST(getattr(self.env, 'CCFLAGS_%s' % lib.upper(), []))
extra_ldflags = TO_LIST(getattr(self.env, 'LDFLAGS_%s' % lib.upper(), []))
extra_cpppath = TO_LIST(getattr(self.env, 'CPPPATH_%s' % lib.upper(), []))
ccflags.extend(extra_ccflags)
ldflags.extend(extra_ldflags)
cpppath.extend(extra_cpppath)
if 'EXTRA_LDFLAGS' in self.env:
ldflags.extend(self.env['EXTRA_LDFLAGS'])
ccflags = unique_list(ccflags)
ldflags = unique_list(ldflags)
cpppath = unique_list(cpppath)
return (ccflags, ldflags, cpppath)
@conf
def CHECK_LIB(conf, libs, mandatory=False, empty_decl=True, set_target=True, shlib=False):
'''check if a set of libraries exist as system libraries
returns the sublist of libs that do exist as a syslib or []
'''
fragment= '''
int foo()
{
int v = 2;
return v*2;
}
'''
ret = []
liblist = TO_LIST(libs)
for lib in liblist[:]:
if GET_TARGET_TYPE(conf, lib) == 'SYSLIB':
ret.append(lib)
continue
(ccflags, ldflags, cpppath) = library_flags(conf, lib)
if shlib:
res = conf.check(features='cc cshlib', fragment=fragment, lib=lib, uselib_store=lib, ccflags=ccflags, ldflags=ldflags, uselib=lib.upper())
else:
res = conf.check(lib=lib, uselib_store=lib, ccflags=ccflags, ldflags=ldflags, uselib=lib.upper())
if not res:
if mandatory:
Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list))
sys.exit(1)
if empty_decl:
# if it isn't a mandatory library, then remove it from dependency lists
if set_target:
SET_TARGET_TYPE(conf, lib, 'EMPTY')
else:
conf.define('HAVE_LIB%s' % lib.upper().replace('-','_'), 1)
conf.env['LIB_' + lib.upper()] = lib
if set_target:
conf.SET_TARGET_TYPE(lib, 'SYSLIB')
ret.append(lib)
return ret
@conf
def CHECK_FUNCS_IN(conf, list, library, mandatory=False, checklibc=False,
headers=None, link=True, empty_decl=True, set_target=True):
"""
check that the functions in 'list' are available in 'library'
if they are, then make that library available as a dependency
if the library is not available and mandatory==True, then
raise an error.
If the library is not available and mandatory==False, then
add the library to the list of dependencies to remove from
build rules
optionally check for the functions first in libc
"""
remaining = TO_LIST(list)
liblist = TO_LIST(library)
# check if some already found
for f in remaining[:]:
if CONFIG_SET(conf, 'HAVE_%s' % f.upper()):
remaining.remove(f)
# see if the functions are in libc
if checklibc:
for f in remaining[:]:
if CHECK_FUNC(conf, f, link=True, headers=headers):
remaining.remove(f)
if remaining == []:
for lib in liblist:
if GET_TARGET_TYPE(conf, lib) != 'SYSLIB' and empty_decl:
SET_TARGET_TYPE(conf, lib, 'EMPTY')
return True
checklist = conf.CHECK_LIB(liblist, empty_decl=empty_decl, set_target=set_target)
for lib in liblist[:]:
if not lib in checklist and mandatory:
Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list))
sys.exit(1)
ret = True
for f in remaining:
if not CHECK_FUNC(conf, f, lib=' '.join(checklist), headers=headers, link=link):
ret = False
return ret
@conf
def IN_LAUNCH_DIR(conf):
'''return True if this rule is being run from the launch directory'''
return os.path.realpath(conf.curdir) == os.path.realpath(Options.launch_dir)
Options.Handler.IN_LAUNCH_DIR = IN_LAUNCH_DIR
@conf
def SAMBA_CONFIG_H(conf, path=None):
'''write out config.h in the right directory'''
# we don't want to produce a config.h in places like lib/replace
# when we are building projects that depend on lib/replace
if not IN_LAUNCH_DIR(conf):
return
if Options.options.debug:
conf.ADD_CFLAGS('-g',
testflags=True)
if Options.options.developer:
# we add these here to ensure that -Wstrict-prototypes is not set during configure
conf.ADD_CFLAGS('-Wall -g -Wshadow -Werror=strict-prototypes -Wstrict-prototypes -Werror=pointer-arith -Wpointer-arith -Wcast-align -Werror=write-strings -Wwrite-strings -Werror-implicit-function-declaration -Wformat=2 -Wno-format-y2k -Wmissing-prototypes -fno-common -Werror=address',
testflags=True)
conf.ADD_CFLAGS('-Wcast-qual', testflags=True)
conf.env.DEVELOPER_MODE = True
# This check is because for ldb_search(), a NULL format string
# is not an error, but some compilers complain about that.
if CHECK_CFLAGS(conf, ["-Werror=format", "-Wformat=2"], '''
int testformat(char *format, ...) __attribute__ ((format (__printf__, 1, 2)));
int main(void) {
testformat(0);
return 0;
}
'''):
if not 'EXTRA_CFLAGS' in conf.env:
conf.env['EXTRA_CFLAGS'] = []
conf.env['EXTRA_CFLAGS'].extend(TO_LIST("-Werror=format"))
if Options.options.picky_developer:
conf.ADD_CFLAGS('-Werror', testflags=True)
if Options.options.fatal_errors:
conf.ADD_CFLAGS('-Wfatal-errors', testflags=True)
if Options.options.pedantic:
conf.ADD_CFLAGS('-W', testflags=True)
if path is None:
conf.write_config_header('config.h', top=True)
else:
conf.write_config_header(path)
conf.SAMBA_CROSS_CHECK_COMPLETE()
@conf
def CONFIG_PATH(conf, name, default):
'''setup a configurable path'''
if not name in conf.env:
if default[0] == '/':
conf.env[name] = default
else:
conf.env[name] = conf.env['PREFIX'] + default
@conf
def ADD_CFLAGS(conf, flags, testflags=False):
'''add some CFLAGS to the command line
optionally set testflags to ensure all the flags work
'''
if testflags:
ok_flags=[]
for f in flags.split():
if CHECK_CFLAGS(conf, f):
ok_flags.append(f)
flags = ok_flags
if not 'EXTRA_CFLAGS' in conf.env:
conf.env['EXTRA_CFLAGS'] = []
conf.env['EXTRA_CFLAGS'].extend(TO_LIST(flags))
@conf
def ADD_LDFLAGS(conf, flags, testflags=False):
'''add some LDFLAGS to the command line
optionally set testflags to ensure all the flags work
this will return the flags that are added, if any
'''
if testflags:
ok_flags=[]
for f in flags.split():
if CHECK_LDFLAGS(conf, f):
ok_flags.append(f)
flags = ok_flags
if not 'EXTRA_LDFLAGS' in conf.env:
conf.env['EXTRA_LDFLAGS'] = []
conf.env['EXTRA_LDFLAGS'].extend(TO_LIST(flags))
return flags
@conf
def ADD_EXTRA_INCLUDES(conf, includes):
'''add some extra include directories to all builds'''
if not 'EXTRA_INCLUDES' in conf.env:
conf.env['EXTRA_INCLUDES'] = []
conf.env['EXTRA_INCLUDES'].extend(TO_LIST(includes))
def CURRENT_CFLAGS(bld, target, cflags, hide_symbols=False):
'''work out the current flags. local flags are added first'''
if not 'EXTRA_CFLAGS' in bld.env:
list = []
else:
list = bld.env['EXTRA_CFLAGS'];
ret = TO_LIST(cflags)
ret.extend(list)
if hide_symbols and bld.env.HAVE_VISIBILITY_ATTR:
ret.append('-fvisibility=hidden')
return ret
@conf
def CHECK_CC_ENV(conf):
"""trim whitespaces from 'CC'.
The build farm sometimes puts a space at the start"""
if os.environ.get('CC'):
conf.env.CC = TO_LIST(os.environ.get('CC'))
if len(conf.env.CC) == 1:
# make for nicer logs if just a single command
conf.env.CC = conf.env.CC[0]
@conf
def SETUP_CONFIGURE_CACHE(conf, enable):
'''enable/disable cache of configure results'''
if enable:
# when -C is chosen, we will use a private cache and will
# not look into system includes. This roughtly matches what
# autoconf does with -C
cache_path = os.path.join(conf.blddir, '.confcache')
mkdir_p(cache_path)
Options.cache_global = os.environ['WAFCACHE'] = cache_path
else:
# when -C is not chosen we will not cache configure checks
# We set the recursion limit low to prevent waf from spending
# a lot of time on the signatures of the files.
Options.cache_global = os.environ['WAFCACHE'] = ''
preproc.recursion_limit = 1
# in either case we don't need to scan system includes
preproc.go_absolute = False
@conf
def SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS(conf):
# we don't want any libraries or modules to rely on runtime
# resolution of symbols
if not sys.platform.startswith("openbsd"):
conf.env.undefined_ldflags = conf.ADD_LDFLAGS('-Wl,-no-undefined', testflags=True)
if not sys.platform.startswith("openbsd") and conf.env.undefined_ignore_ldflags == []:
if conf.CHECK_LDFLAGS(['-undefined', 'dynamic_lookup']):
conf.env.undefined_ignore_ldflags = ['-undefined', 'dynamic_lookup']
ntdb-1.0/buildtools/wafsamba/samba_autoproto.py 0000664 0000000 0000000 00000001426 12241515307 0022016 0 ustar 00root root 0000000 0000000 # waf build tool for building automatic prototypes from C source
import Build
from samba_utils import *
def SAMBA_AUTOPROTO(bld, header, source):
'''rule for samba prototype generation'''
bld.SET_BUILD_GROUP('prototypes')
relpath = os_path_relpath(bld.curdir, bld.srcnode.abspath())
name = os.path.join(relpath, header)
SET_TARGET_TYPE(bld, name, 'PROTOTYPE')
t = bld(
name = name,
source = source,
target = header,
on_results=True,
ext_out='.c',
before ='cc',
rule = '${PERL} "${SCRIPT}/mkproto.pl" --srcdir=.. --builddir=. --public=/dev/null --private="${TGT}" ${SRC}'
)
t.env.SCRIPT = os.path.join(bld.srcnode.abspath(), 'source4/script')
Build.BuildContext.SAMBA_AUTOPROTO = SAMBA_AUTOPROTO
ntdb-1.0/buildtools/wafsamba/samba_bundled.py 0000664 0000000 0000000 00000020622 12241515307 0021376 0 ustar 00root root 0000000 0000000 # functions to support bundled libraries
from Configure import conf
import sys, Logs
from samba_utils import *
def PRIVATE_NAME(bld, name, private_extension, private_library):
'''possibly rename a library to include a bundled extension'''
# we now use the same private name for libraries as the public name.
# see http://git.samba.org/?p=tridge/junkcode.git;a=tree;f=shlib for a
# demonstration that this is the right thing to do
# also see http://lists.samba.org/archive/samba-technical/2011-January/075816.html
return name
def target_in_list(target, lst, default):
for l in lst:
if target == l:
return True
if '!' + target == l:
return False
if l == 'ALL':
return True
if l == 'NONE':
return False
return default
def BUILTIN_LIBRARY(bld, name):
'''return True if a library should be builtin
instead of being built as a shared lib'''
return target_in_list(name, bld.env.BUILTIN_LIBRARIES, False)
Build.BuildContext.BUILTIN_LIBRARY = BUILTIN_LIBRARY
def BUILTIN_DEFAULT(opt, builtins):
'''set a comma separated default list of builtin libraries for this package'''
if 'BUILTIN_LIBRARIES_DEFAULT' in Options.options:
return
Options.options['BUILTIN_LIBRARIES_DEFAULT'] = builtins
Options.Handler.BUILTIN_DEFAULT = BUILTIN_DEFAULT
def PRIVATE_EXTENSION_DEFAULT(opt, extension, noextension=''):
'''set a default private library extension'''
if 'PRIVATE_EXTENSION_DEFAULT' in Options.options:
return
Options.options['PRIVATE_EXTENSION_DEFAULT'] = extension
Options.options['PRIVATE_EXTENSION_EXCEPTION'] = noextension
Options.Handler.PRIVATE_EXTENSION_DEFAULT = PRIVATE_EXTENSION_DEFAULT
def minimum_library_version(conf, libname, default):
'''allow override of mininum system library version'''
minlist = Options.options.MINIMUM_LIBRARY_VERSION
if not minlist:
return default
for m in minlist.split(','):
a = m.split(':')
if len(a) != 2:
Logs.error("Bad syntax for --minimum-library-version of %s" % m)
sys.exit(1)
if a[0] == libname:
return a[1]
return default
@conf
def LIB_MAY_BE_BUNDLED(conf, libname):
return ('NONE' not in conf.env.BUNDLED_LIBS and
'!%s' % libname not in conf.env.BUNDLED_LIBS)
@conf
def LIB_MUST_BE_BUNDLED(conf, libname):
return ('ALL' in conf.env.BUNDLED_LIBS or
libname in conf.env.BUNDLED_LIBS)
@conf
def LIB_MUST_BE_PRIVATE(conf, libname):
return ('ALL' in conf.env.PRIVATE_LIBS or
libname in conf.env.PRIVATE_LIBS)
@conf
def CHECK_PREREQUISITES(conf, prereqs):
missing = []
for syslib in TO_LIST(prereqs):
f = 'FOUND_SYSTEMLIB_%s' % syslib
if not f in conf.env:
missing.append(syslib)
return missing
@runonce
@conf
def CHECK_BUNDLED_SYSTEM_PKG(conf, libname, minversion='0.0.0',
onlyif=None, implied_deps=None, pkg=None):
'''check if a library is available as a system library.
This only tries using pkg-config
'''
if conf.LIB_MUST_BE_BUNDLED(libname):
return False
found = 'FOUND_SYSTEMLIB_%s' % libname
if found in conf.env:
return conf.env[found]
# see if the library should only use a system version if another dependent
# system version is found. That prevents possible use of mixed library
# versions
if onlyif:
missing = conf.CHECK_PREREQUISITES(onlyif)
if missing:
if not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing))
sys.exit(1)
conf.env[found] = False
return False
minversion = minimum_library_version(conf, libname, minversion)
msg = 'Checking for system %s' % libname
if minversion != '0.0.0':
msg += ' >= %s' % minversion
if pkg is None:
pkg = libname
if conf.check_cfg(package=pkg,
args='"%s >= %s" --cflags --libs' % (pkg, minversion),
msg=msg, uselib_store=libname.upper()):
conf.SET_TARGET_TYPE(libname, 'SYSLIB')
conf.env[found] = True
if implied_deps:
conf.SET_SYSLIB_DEPS(libname, implied_deps)
return True
conf.env[found] = False
if not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion))
sys.exit(1)
return False
@runonce
@conf
def CHECK_BUNDLED_SYSTEM(conf, libname, minversion='0.0.0',
checkfunctions=None, headers=None,
onlyif=None, implied_deps=None,
require_headers=True):
'''check if a library is available as a system library.
this first tries via pkg-config, then if that fails
tries by testing for a specified function in the specified lib
'''
if conf.LIB_MUST_BE_BUNDLED(libname):
return False
found = 'FOUND_SYSTEMLIB_%s' % libname
if found in conf.env:
return conf.env[found]
def check_functions_headers():
'''helper function for CHECK_BUNDLED_SYSTEM'''
if checkfunctions is None:
return True
if require_headers and headers and not conf.CHECK_HEADERS(headers, lib=libname):
return False
return conf.CHECK_FUNCS_IN(checkfunctions, libname, headers=headers,
empty_decl=False, set_target=False)
# see if the library should only use a system version if another dependent
# system version is found. That prevents possible use of mixed library
# versions
if onlyif:
missing = conf.CHECK_PREREQUISITES(onlyif)
if missing:
if not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing))
sys.exit(1)
conf.env[found] = False
return False
minversion = minimum_library_version(conf, libname, minversion)
msg = 'Checking for system %s' % libname
if minversion != '0.0.0':
msg += ' >= %s' % minversion
# try pkgconfig first
if (conf.check_cfg(package=libname,
args='"%s >= %s" --cflags --libs' % (libname, minversion),
msg=msg) and
check_functions_headers()):
conf.SET_TARGET_TYPE(libname, 'SYSLIB')
conf.env[found] = True
if implied_deps:
conf.SET_SYSLIB_DEPS(libname, implied_deps)
return True
if checkfunctions is not None:
if check_functions_headers():
conf.env[found] = True
if implied_deps:
conf.SET_SYSLIB_DEPS(libname, implied_deps)
conf.SET_TARGET_TYPE(libname, 'SYSLIB')
return True
conf.env[found] = False
if not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion))
sys.exit(1)
return False
def tuplize_version(version):
return tuple([int(x) for x in version.split(".")])
@runonce
@conf
def CHECK_BUNDLED_SYSTEM_PYTHON(conf, libname, modulename, minversion='0.0.0'):
'''check if a python module is available on the system and
has the specified minimum version.
'''
if conf.LIB_MUST_BE_BUNDLED(libname):
return False
# see if the library should only use a system version if another dependent
# system version is found. That prevents possible use of mixed library
# versions
minversion = minimum_library_version(conf, libname, minversion)
try:
m = __import__(modulename)
except ImportError:
found = False
else:
try:
version = m.__version__
except AttributeError:
found = False
else:
found = tuplize_version(version) >= tuplize_version(minversion)
if not found and not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: Python module %s of version %s not found, and bundling disabled' % (libname, minversion))
sys.exit(1)
return found
def NONSHARED_BINARY(bld, name):
'''return True if a binary should be built without non-system shared libs'''
return target_in_list(name, bld.env.NONSHARED_BINARIES, False)
Build.BuildContext.NONSHARED_BINARY = NONSHARED_BINARY
ntdb-1.0/buildtools/wafsamba/samba_conftests.py 0000664 0000000 0000000 00000036144 12241515307 0021777 0 ustar 00root root 0000000 0000000 # a set of config tests that use the samba_autoconf functions
# to test for commonly needed configuration options
import os, shutil, re
import Build, Configure, Utils
from Configure import conf
from samba_utils import *
def add_option(self, *k, **kw):
'''syntax help: provide the "match" attribute to opt.add_option() so that folders can be added to specific config tests'''
match = kw.get('match', [])
if match:
del kw['match']
opt = self.parser.add_option(*k, **kw)
opt.match = match
return opt
Options.Handler.add_option = add_option
@conf
def check(self, *k, **kw):
'''Override the waf defaults to inject --with-directory options'''
if not 'env' in kw:
kw['env'] = self.env.copy()
# match the configuration test with speficic options, for example:
# --with-libiconv -> Options.options.iconv_open -> "Checking for library iconv"
additional_dirs = []
if 'msg' in kw:
msg = kw['msg']
for x in Options.Handler.parser.parser.option_list:
if getattr(x, 'match', None) and msg in x.match:
d = getattr(Options.options, x.dest, '')
if d:
additional_dirs.append(d)
# we add the additional dirs twice: once for the test data, and again if the compilation test suceeds below
def add_options_dir(dirs, env):
for x in dirs:
if not x in env.CPPPATH:
env.CPPPATH = [os.path.join(x, 'include')] + env.CPPPATH
if not x in env.LIBPATH:
env.LIBPATH = [os.path.join(x, 'lib')] + env.LIBPATH
add_options_dir(additional_dirs, kw['env'])
self.validate_c(kw)
self.check_message_1(kw['msg'])
ret = None
try:
ret = self.run_c_code(*k, **kw)
except Configure.ConfigurationError, e:
self.check_message_2(kw['errmsg'], 'YELLOW')
if 'mandatory' in kw and kw['mandatory']:
if Logs.verbose > 1:
raise
else:
self.fatal('the configuration failed (see %r)' % self.log.name)
else:
kw['success'] = ret
self.check_message_2(self.ret_msg(kw['okmsg'], kw))
# success! keep the CPPPATH/LIBPATH
add_options_dir(additional_dirs, self.env)
self.post_check(*k, **kw)
if not kw.get('execute', False):
return ret == 0
return ret
@conf
def CHECK_ICONV(conf, define='HAVE_NATIVE_ICONV'):
'''check if the iconv library is installed
optionally pass a define'''
if conf.CHECK_FUNCS_IN('iconv_open', 'iconv', checklibc=True, headers='iconv.h'):
conf.DEFINE(define, 1)
return True
return False
@conf
def CHECK_LARGEFILE(conf, define='HAVE_LARGEFILE'):
'''see what we need for largefile support'''
getconf_cflags = conf.CHECK_COMMAND(['getconf', 'LFS_CFLAGS']);
if getconf_cflags is not False:
if (conf.CHECK_CODE('return !(sizeof(off_t) >= 8)',
define='WORKING_GETCONF_LFS_CFLAGS',
execute=True,
cflags=getconf_cflags,
msg='Checking getconf large file support flags work')):
conf.ADD_CFLAGS(getconf_cflags)
getconf_cflags_list=TO_LIST(getconf_cflags)
for flag in getconf_cflags_list:
if flag[:2] == "-D":
flag_split = flag[2:].split('=')
if len(flag_split) == 1:
conf.DEFINE(flag_split[0], '1')
else:
conf.DEFINE(flag_split[0], flag_split[1])
if conf.CHECK_CODE('return !(sizeof(off_t) >= 8)',
define,
execute=True,
msg='Checking for large file support without additional flags'):
return True
if conf.CHECK_CODE('return !(sizeof(off_t) >= 8)',
define,
execute=True,
cflags='-D_FILE_OFFSET_BITS=64',
msg='Checking for -D_FILE_OFFSET_BITS=64'):
conf.DEFINE('_FILE_OFFSET_BITS', 64)
return True
if conf.CHECK_CODE('return !(sizeof(off_t) >= 8)',
define,
execute=True,
cflags='-D_LARGE_FILES',
msg='Checking for -D_LARGE_FILES'):
conf.DEFINE('_LARGE_FILES', 1)
return True
return False
@conf
def CHECK_C_PROTOTYPE(conf, function, prototype, define, headers=None, msg=None):
'''verify that a C prototype matches the one on the current system'''
if not conf.CHECK_DECLS(function, headers=headers):
return False
if not msg:
msg = 'Checking C prototype for %s' % function
return conf.CHECK_CODE('%s; void *_x = (void *)%s' % (prototype, function),
define=define,
local_include=False,
headers=headers,
link=False,
execute=False,
msg=msg)
@conf
def CHECK_CHARSET_EXISTS(conf, charset, outcharset='UCS-2LE', headers=None, define=None):
'''check that a named charset is able to be used with iconv_open() for conversion
to a target charset
'''
msg = 'Checking if can we convert from %s to %s' % (charset, outcharset)
if define is None:
define = 'HAVE_CHARSET_%s' % charset.upper().replace('-','_')
return conf.CHECK_CODE('''
iconv_t cd = iconv_open("%s", "%s");
if (cd == 0 || cd == (iconv_t)-1) return -1;
''' % (charset, outcharset),
define=define,
execute=True,
msg=msg,
lib='iconv',
headers=headers)
def find_config_dir(conf):
'''find a directory to run tests in'''
k = 0
while k < 10000:
dir = os.path.join(conf.blddir, '.conf_check_%d' % k)
try:
shutil.rmtree(dir)
except OSError:
pass
try:
os.stat(dir)
except:
break
k += 1
try:
os.makedirs(dir)
except:
conf.fatal('cannot create a configuration test folder %r' % dir)
try:
os.stat(dir)
except:
conf.fatal('cannot use the configuration test folder %r' % dir)
return dir
@conf
def CHECK_SHLIB_INTRASINC_NAME_FLAGS(conf, msg):
'''
check if the waf default flags for setting the name of lib
are ok
'''
snip = '''
int foo(int v) {
return v * 2;
}
'''
return conf.check(features='cc cshlib',vnum="1",fragment=snip,msg=msg)
@conf
def CHECK_NEED_LC(conf, msg):
'''check if we need -lc'''
dir = find_config_dir(conf)
env = conf.env
bdir = os.path.join(dir, 'testbuild2')
if not os.path.exists(bdir):
os.makedirs(bdir)
subdir = os.path.join(dir, "liblctest")
os.makedirs(subdir)
dest = open(os.path.join(subdir, 'liblc1.c'), 'w')
dest.write('#include \nint lib_func(void) { FILE *f = fopen("foo", "r");}\n')
dest.close()
bld = Build.BuildContext()
bld.log = conf.log
bld.all_envs.update(conf.all_envs)
bld.all_envs['default'] = env
bld.lst_variants = bld.all_envs.keys()
bld.load_dirs(dir, bdir)
bld.rescan(bld.srcnode)
bld(features='cc cshlib',
source='liblctest/liblc1.c',
ldflags=conf.env['EXTRA_LDFLAGS'],
target='liblc',
name='liblc')
try:
bld.compile()
conf.check_message(msg, '', True)
return True
except:
conf.check_message(msg, '', False)
return False
@conf
def CHECK_SHLIB_W_PYTHON(conf, msg):
'''check if we need -undefined dynamic_lookup'''
dir = find_config_dir(conf)
env = conf.env
snip = '''
#include
#include
#define environ (*_NSGetEnviron())
static PyObject *ldb_module = NULL;
int foo(int v) {
extern char **environ;
environ[0] = 1;
ldb_module = PyImport_ImportModule("ldb");
return v * 2;
}'''
return conf.check(features='cc cshlib',uselib='PYEMBED',fragment=snip,msg=msg)
# this one is quite complex, and should probably be broken up
# into several parts. I'd quite like to create a set of CHECK_COMPOUND()
# functions that make writing complex compound tests like this much easier
@conf
def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None):
'''see if the platform supports building libraries'''
if msg is None:
if rpath:
msg = "rpath library support"
else:
msg = "building library support"
dir = find_config_dir(conf)
bdir = os.path.join(dir, 'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
env = conf.env
subdir = os.path.join(dir, "libdir")
os.makedirs(subdir)
dest = open(os.path.join(subdir, 'lib1.c'), 'w')
dest.write('int lib_func(void) { return 42; }\n')
dest.close()
dest = open(os.path.join(dir, 'main.c'), 'w')
dest.write('int main(void) {return !(lib_func() == 42);}\n')
dest.close()
bld = Build.BuildContext()
bld.log = conf.log
bld.all_envs.update(conf.all_envs)
bld.all_envs['default'] = env
bld.lst_variants = bld.all_envs.keys()
bld.load_dirs(dir, bdir)
bld.rescan(bld.srcnode)
ldflags = []
if version_script:
ldflags.append("-Wl,--version-script=%s/vscript" % bld.path.abspath())
dest = open(os.path.join(dir,'vscript'), 'w')
dest.write('TEST_1.0A2 { global: *; };\n')
dest.close()
bld(features='cc cshlib',
source='libdir/lib1.c',
target='libdir/lib1',
ldflags=ldflags,
name='lib1')
o = bld(features='cc cprogram',
source='main.c',
target='prog1',
uselib_local='lib1')
if rpath:
o.rpath=os.path.join(bdir, 'default/libdir')
# compile the program
try:
bld.compile()
except:
conf.check_message(msg, '', False)
return False
# path for execution
lastprog = o.link_task.outputs[0].abspath(env)
if not rpath:
if 'LD_LIBRARY_PATH' in os.environ:
old_ld_library_path = os.environ['LD_LIBRARY_PATH']
else:
old_ld_library_path = None
ADD_LD_LIBRARY_PATH(os.path.join(bdir, 'default/libdir'))
# we need to run the program, try to get its result
args = conf.SAMBA_CROSS_ARGS(msg=msg)
proc = Utils.pproc.Popen([lastprog] + args, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE)
(out, err) = proc.communicate()
w = conf.log.write
w(str(out))
w('\n')
w(str(err))
w('\nreturncode %r\n' % proc.returncode)
ret = (proc.returncode == 0)
if not rpath:
os.environ['LD_LIBRARY_PATH'] = old_ld_library_path or ''
conf.check_message(msg, '', ret)
return ret
@conf
def CHECK_PERL_MANPAGE(conf, msg=None, section=None):
'''work out what extension perl uses for manpages'''
if msg is None:
if section:
msg = "perl man%s extension" % section
else:
msg = "perl manpage generation"
conf.check_message_1(msg)
dir = find_config_dir(conf)
bdir = os.path.join(dir, 'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
dest = open(os.path.join(bdir, 'Makefile.PL'), 'w')
dest.write("""
use ExtUtils::MakeMaker;
WriteMakefile(
'NAME' => 'WafTest',
'EXE_FILES' => [ 'WafTest' ]
);
""")
dest.close()
back = os.path.abspath('.')
os.chdir(bdir)
proc = Utils.pproc.Popen(['perl', 'Makefile.PL'],
stdout=Utils.pproc.PIPE,
stderr=Utils.pproc.PIPE)
(out, err) = proc.communicate()
os.chdir(back)
ret = (proc.returncode == 0)
if not ret:
conf.check_message_2('not found', color='YELLOW')
return
if section:
f = open(os.path.join(bdir,'Makefile'), 'r')
man = f.read()
f.close()
m = re.search('MAN%sEXT\s+=\s+(\w+)' % section, man)
if not m:
conf.check_message_2('not found', color='YELLOW')
return
ext = m.group(1)
conf.check_message_2(ext)
return ext
conf.check_message_2('ok')
return True
@conf
def CHECK_COMMAND(conf, cmd, msg=None, define=None, on_target=True, boolean=False):
'''run a command and return result'''
if msg is None:
msg = 'Checking %s' % ' '.join(cmd)
conf.COMPOUND_START(msg)
cmd = cmd[:]
if on_target:
cmd.extend(conf.SAMBA_CROSS_ARGS(msg=msg))
try:
ret = Utils.cmd_output(cmd)
except:
conf.COMPOUND_END(False)
return False
if boolean:
conf.COMPOUND_END('ok')
if define:
conf.DEFINE(define, '1')
else:
ret = ret.strip()
conf.COMPOUND_END(ret)
if define:
conf.DEFINE(define, ret, quote=True)
return ret
@conf
def CHECK_UNAME(conf):
'''setup SYSTEM_UNAME_* defines'''
ret = True
for v in "sysname machine release version".split():
if not conf.CHECK_CODE('''
struct utsname n;
if (uname(&n) == -1) return -1;
printf("%%s", n.%s);
''' % v,
define='SYSTEM_UNAME_%s' % v.upper(),
execute=True,
define_ret=True,
quote=True,
headers='sys/utsname.h',
local_include=False,
msg="Checking uname %s type" % v):
ret = False
return ret
@conf
def CHECK_INLINE(conf):
'''check for the right value for inline'''
conf.COMPOUND_START('Checking for inline')
for i in ['inline', '__inline__', '__inline']:
ret = conf.CHECK_CODE('''
typedef int foo_t;
static %s foo_t static_foo () {return 0; }
%s foo_t foo () {return 0; }''' % (i, i),
define='INLINE_MACRO',
addmain=False,
link=False)
if ret:
if i != 'inline':
conf.DEFINE('inline', i, quote=False)
break
if not ret:
conf.COMPOUND_END(ret)
else:
conf.COMPOUND_END(i)
return ret
@conf
def CHECK_XSLTPROC_MANPAGES(conf):
'''check if xsltproc can run with the given stylesheets'''
if not conf.CONFIG_SET('XSLTPROC'):
conf.find_program('xsltproc', var='XSLTPROC')
if not conf.CONFIG_SET('XSLTPROC'):
return False
s='http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl'
conf.CHECK_COMMAND('%s --nonet %s 2> /dev/null' % (conf.env.XSLTPROC, s),
msg='Checking for stylesheet %s' % s,
define='XSLTPROC_MANPAGES', on_target=False,
boolean=True)
if not conf.CONFIG_SET('XSLTPROC_MANPAGES'):
print "A local copy of the docbook.xsl wasn't found on your system" \
" consider installing package like docbook-xsl"
ntdb-1.0/buildtools/wafsamba/samba_cross.py 0000664 0000000 0000000 00000010347 12241515307 0021115 0 ustar 00root root 0000000 0000000 # functions for handling cross-compilation
import Utils, Logs, sys, os, Options, re
from Configure import conf
real_Popen = None
ANSWER_UNKNOWN = (254, "")
ANSWER_FAIL = (255, "")
ANSWER_OK = (0, "")
cross_answers_incomplete = False
def add_answer(ca_file, msg, answer):
'''add an answer to a set of cross answers'''
try:
f = open(ca_file, 'a')
except:
Logs.error("Unable to open cross-answers file %s" % ca_file)
sys.exit(1)
if answer == ANSWER_OK:
f.write('%s: OK\n' % msg)
elif answer == ANSWER_UNKNOWN:
f.write('%s: UNKNOWN\n' % msg)
elif answer == ANSWER_FAIL:
f.write('%s: FAIL\n' % msg)
else:
(retcode, retstring) = answer
f.write('%s: (%d, "%s")' % (msg, retcode, retstring))
f.close()
def cross_answer(ca_file, msg):
'''return a (retcode,retstring) tuple from a answers file'''
try:
f = open(ca_file, 'r')
except:
add_answer(ca_file, msg, ANSWER_UNKNOWN)
return ANSWER_UNKNOWN
for line in f:
line = line.strip()
if line == '' or line[0] == '#':
continue
if line.find(':') != -1:
a = line.split(':')
thismsg = a[0].strip()
if thismsg != msg:
continue
ans = a[1].strip()
if ans == "OK" or ans == "YES":
f.close()
return ANSWER_OK
elif ans == "UNKNOWN":
f.close()
return ANSWER_UNKNOWN
elif ans == "FAIL" or ans == "NO":
f.close()
return ANSWER_FAIL
elif ans[0] == '"':
return (0, ans.strip('"'))
elif ans[0] == "'":
return (0, ans.strip("'"))
else:
m = re.match('\(\s*(-?\d+)\s*,\s*\"(.*)\"\s*\)', ans)
if m:
f.close()
return (int(m.group(1)), m.group(2))
else:
raise Utils.WafError("Bad answer format '%s' in %s" % (line, ca_file))
f.close()
add_answer(ca_file, msg, ANSWER_UNKNOWN)
return ANSWER_UNKNOWN
class cross_Popen(Utils.pproc.Popen):
'''cross-compilation wrapper for Popen'''
def __init__(*k, **kw):
(obj, args) = k
if '--cross-execute' in args:
# when --cross-execute is set, then change the arguments
# to use the cross emulator
i = args.index('--cross-execute')
newargs = args[i+1].split()
newargs.extend(args[0:i])
args = newargs
elif '--cross-answers' in args:
# when --cross-answers is set, then change the arguments
# to use the cross answers if available
i = args.index('--cross-answers')
ca_file = args[i+1]
msg = args[i+2]
ans = cross_answer(ca_file, msg)
if ans == ANSWER_UNKNOWN:
global cross_answers_incomplete
cross_answers_incomplete = True
(retcode, retstring) = ans
args = ['/bin/sh', '-c', "echo -n '%s'; exit %d" % (retstring, retcode)]
real_Popen.__init__(*(obj, args), **kw)
@conf
def SAMBA_CROSS_ARGS(conf, msg=None):
'''get exec_args to pass when running cross compiled binaries'''
if not conf.env.CROSS_COMPILE:
return []
global real_Popen
if real_Popen is None:
real_Popen = Utils.pproc.Popen
Utils.pproc.Popen = cross_Popen
ret = []
if conf.env.CROSS_EXECUTE:
ret.extend(['--cross-execute', conf.env.CROSS_EXECUTE])
elif conf.env.CROSS_ANSWERS:
if msg is None:
raise Utils.WafError("Cannot have NULL msg in cross-answers")
ret.extend(['--cross-answers', os.path.join(Options.launch_dir, conf.env.CROSS_ANSWERS), msg])
if ret == []:
raise Utils.WafError("Cannot cross-compile without either --cross-execute or --cross-answers")
return ret
@conf
def SAMBA_CROSS_CHECK_COMPLETE(conf):
'''check if we have some unanswered questions'''
global cross_answers_incomplete
if conf.env.CROSS_COMPILE and cross_answers_incomplete:
raise Utils.WafError("Cross answers file %s is incomplete" % conf.env.CROSS_ANSWERS)
return True
ntdb-1.0/buildtools/wafsamba/samba_deps.py 0000664 0000000 0000000 00000121755 12241515307 0020725 0 ustar 00root root 0000000 0000000 # Samba automatic dependency handling and project rules
import Build, os, sys, re, Environment, Logs, time
from samba_utils import *
from samba_autoconf import *
from samba_bundled import BUILTIN_LIBRARY
@conf
def ADD_GLOBAL_DEPENDENCY(ctx, dep):
'''add a dependency for all binaries and libraries'''
if not 'GLOBAL_DEPENDENCIES' in ctx.env:
ctx.env.GLOBAL_DEPENDENCIES = []
ctx.env.GLOBAL_DEPENDENCIES.append(dep)
@conf
def BREAK_CIRCULAR_LIBRARY_DEPENDENCIES(ctx):
'''indicate that circular dependencies between libraries should be broken.'''
ctx.env.ALLOW_CIRCULAR_LIB_DEPENDENCIES = True
@conf
def SET_SYSLIB_DEPS(conf, target, deps):
'''setup some implied dependencies for a SYSLIB'''
cache = LOCAL_CACHE(conf, 'SYSLIB_DEPS')
cache[target] = deps
def expand_subsystem_deps(bld):
'''expand the reverse dependencies resulting from subsystem
attributes of modules. This is walking over the complete list
of declared subsystems, and expands the samba_deps_extended list for any
module<->subsystem dependencies'''
subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS')
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
for subsystem_name in subsystem_list:
bld.ASSERT(subsystem_name in targets, "Subsystem target %s not declared" % subsystem_name)
type = targets[subsystem_name]
if type == 'DISABLED' or type == 'EMPTY':
continue
# for example,
# subsystem_name = dcerpc_server (a subsystem)
# subsystem = dcerpc_server (a subsystem object)
# module_name = rpc_epmapper (a module within the dcerpc_server subsystem)
# module = rpc_epmapper (a module object within the dcerpc_server subsystem)
subsystem = bld.name_to_obj(subsystem_name, bld.env)
bld.ASSERT(subsystem is not None, "Unable to find subsystem %s" % subsystem_name)
for d in subsystem_list[subsystem_name]:
module_name = d['TARGET']
module_type = targets[module_name]
if module_type in ['DISABLED', 'EMPTY']:
continue
bld.ASSERT(subsystem is not None,
"Subsystem target %s for %s (%s) not found" % (subsystem_name, module_name, module_type))
if module_type in ['SUBSYSTEM']:
# if a module is a plain object type (not a library) then the
# subsystem it is part of needs to have it as a dependency, so targets
# that depend on this subsystem get the modules of that subsystem
subsystem.samba_deps_extended.append(module_name)
subsystem.samba_deps_extended = unique_list(subsystem.samba_deps_extended)
def build_dependencies(self):
'''This builds the dependency list for a target. It runs after all the targets are declared
The reason this is not just done in the SAMBA_*() rules is that we have no way of knowing
the full dependency list for a target until we have all of the targets declared.
'''
if self.samba_type in ['LIBRARY', 'BINARY', 'PYTHON']:
self.uselib = list(self.final_syslibs)
self.uselib_local = list(self.final_libs)
self.add_objects = list(self.final_objects)
# extra link flags from pkg_config
libs = self.final_syslibs.copy()
(ccflags, ldflags, cpppath) = library_flags(self, list(libs))
new_ldflags = getattr(self, 'samba_ldflags', [])[:]
new_ldflags.extend(ldflags)
self.ldflags = new_ldflags
if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ldflags:
for f in self.env.undefined_ldflags:
self.ldflags.remove(f)
if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ignore_ldflags:
for f in self.env.undefined_ignore_ldflags:
self.ldflags.append(f)
debug('deps: computed dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s',
self.sname, self.uselib, self.uselib_local, self.add_objects)
if self.samba_type in ['SUBSYSTEM']:
# this is needed for the ccflags of libs that come from pkg_config
self.uselib = list(self.final_syslibs)
self.uselib.extend(list(self.direct_syslibs))
for lib in self.final_libs:
t = self.bld.name_to_obj(lib, self.bld.env)
self.uselib.extend(list(t.final_syslibs))
self.uselib = unique_list(self.uselib)
if getattr(self, 'uselib', None):
up_list = []
for l in self.uselib:
up_list.append(l.upper())
self.uselib = up_list
def build_includes(self):
'''This builds the right set of includes for a target.
One tricky part of this is that the includes= attribute for a
target needs to use paths which are relative to that targets
declaration directory (which we can get at via t.path).
The way this works is the includes list gets added as
samba_includes in the main build task declaration. Then this
function runs after all of the tasks are declared, and it
processes the samba_includes attribute to produce a includes=
attribute
'''
if getattr(self, 'samba_includes', None) is None:
return
bld = self.bld
inc_deps = includes_objects(bld, self, set(), {})
includes = []
# maybe add local includes
if getattr(self, 'local_include', True) and getattr(self, 'local_include_first', True):
includes.append('.')
includes.extend(self.samba_includes_extended)
if 'EXTRA_INCLUDES' in bld.env and getattr(self, 'global_include', True):
includes.extend(bld.env['EXTRA_INCLUDES'])
includes.append('#')
inc_set = set()
inc_abs = []
for d in inc_deps:
t = bld.name_to_obj(d, bld.env)
bld.ASSERT(t is not None, "Unable to find dependency %s for %s" % (d, self.sname))
inclist = getattr(t, 'samba_includes_extended', [])[:]
if getattr(t, 'local_include', True):
inclist.append('.')
if inclist == []:
continue
tpath = t.samba_abspath
for inc in inclist:
npath = tpath + '/' + inc
if not npath in inc_set:
inc_abs.append(npath)
inc_set.add(npath)
mypath = self.path.abspath(bld.env)
for inc in inc_abs:
relpath = os_path_relpath(inc, mypath)
includes.append(relpath)
if getattr(self, 'local_include', True) and not getattr(self, 'local_include_first', True):
includes.append('.')
# now transform the includes list to be relative to the top directory
# which is represented by '#' in waf. This allows waf to cache the
# includes lists more efficiently
includes_top = []
for i in includes:
if i[0] == '#':
# some are already top based
includes_top.append(i)
continue
absinc = os.path.join(self.path.abspath(), i)
relinc = os_path_relpath(absinc, self.bld.srcnode.abspath())
includes_top.append('#' + relinc)
self.includes = unique_list(includes_top)
debug('deps: includes for target %s: includes=%s',
self.sname, self.includes)
def add_init_functions(self):
'''This builds the right set of init functions'''
bld = self.bld
subsystems = LOCAL_CACHE(bld, 'INIT_FUNCTIONS')
# cope with the separated object lists from BINARY and LIBRARY targets
sname = self.sname
if sname.endswith('.objlist'):
sname = sname[0:-8]
modules = []
if sname in subsystems:
modules.append(sname)
m = getattr(self, 'samba_modules', None)
if m is not None:
modules.extend(TO_LIST(m))
m = getattr(self, 'samba_subsystem', None)
if m is not None:
modules.append(m)
sentinel = getattr(self, 'init_function_sentinel', 'NULL')
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
cflags = getattr(self, 'samba_cflags', [])[:]
if modules == []:
sname = sname.replace('-','_')
sname = sname.replace('/','_')
cflags.append('-DSTATIC_%s_MODULES=%s' % (sname, sentinel))
if sentinel == 'NULL':
cflags.append('-DSTATIC_%s_MODULES_PROTO=' % sname)
self.ccflags = cflags
return
for m in modules:
bld.ASSERT(m in subsystems,
"No init_function defined for module '%s' in target '%s'" % (m, self.sname))
init_fn_list = []
for d in subsystems[m]:
if targets[d['TARGET']] != 'DISABLED':
init_fn_list.append(d['INIT_FUNCTION'])
if init_fn_list == []:
cflags.append('-DSTATIC_%s_MODULES=%s' % (m, sentinel))
if sentinel == 'NULL':
cflags.append('-DSTATIC_%s_MODULES_PROTO' % m)
else:
cflags.append('-DSTATIC_%s_MODULES=%s' % (m, ','.join(init_fn_list) + ',' + sentinel))
proto=''
for f in init_fn_list:
proto = proto + '_MODULE_PROTO(%s)' % f
cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto))
self.ccflags = cflags
def check_duplicate_sources(bld, tgt_list):
'''see if we are compiling the same source file more than once
without an allow_duplicates attribute'''
debug('deps: checking for duplicate sources')
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
ret = True
global tstart
for t in tgt_list:
source_list = TO_LIST(getattr(t, 'source', ''))
tpath = os.path.normpath(os_path_relpath(t.path.abspath(bld.env), t.env.BUILD_DIRECTORY + '/default'))
obj_sources = set()
for s in source_list:
p = os.path.normpath(os.path.join(tpath, s))
if p in obj_sources:
Logs.error("ERROR: source %s appears twice in target '%s'" % (p, t.sname))
sys.exit(1)
obj_sources.add(p)
t.samba_source_set = obj_sources
subsystems = {}
# build a list of targets that each source file is part of
for t in tgt_list:
sources = []
if not targets[t.sname] in [ 'LIBRARY', 'BINARY', 'PYTHON' ]:
continue
for obj in t.add_objects:
t2 = t.bld.name_to_obj(obj, bld.env)
source_set = getattr(t2, 'samba_source_set', set())
for s in source_set:
if not s in subsystems:
subsystems[s] = {}
if not t.sname in subsystems[s]:
subsystems[s][t.sname] = []
subsystems[s][t.sname].append(t2.sname)
for s in subsystems:
if len(subsystems[s]) > 1 and Options.options.SHOW_DUPLICATES:
Logs.warn("WARNING: source %s is in more than one target: %s" % (s, subsystems[s].keys()))
for tname in subsystems[s]:
if len(subsystems[s][tname]) > 1:
raise Utils.WafError("ERROR: source %s is in more than one subsystem of target '%s': %s" % (s, tname, subsystems[s][tname]))
return ret
def check_orphaned_targets(bld, tgt_list):
'''check if any build targets are orphaned'''
target_dict = LOCAL_CACHE(bld, 'TARGET_TYPE')
debug('deps: checking for orphaned targets')
for t in tgt_list:
if getattr(t, 'samba_used', False):
continue
type = target_dict[t.sname]
if not type in ['BINARY', 'LIBRARY', 'MODULE', 'ET', 'PYTHON']:
if re.search('^PIDL_', t.sname) is None:
Logs.warn("Target %s of type %s is unused by any other target" % (t.sname, type))
def check_group_ordering(bld, tgt_list):
'''see if we have any dependencies that violate the group ordering
It is an error for a target to depend on a target from a later
build group
'''
def group_name(g):
tm = bld.task_manager
return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0]
for g in bld.task_manager.groups:
gname = group_name(g)
for t in g.tasks_gen:
t.samba_group = gname
grp_map = {}
idx = 0
for g in bld.task_manager.groups:
name = group_name(g)
grp_map[name] = idx
idx += 1
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
ret = True
for t in tgt_list:
tdeps = getattr(t, 'add_objects', []) + getattr(t, 'uselib_local', [])
for d in tdeps:
t2 = bld.name_to_obj(d, bld.env)
if t2 is None:
continue
map1 = grp_map[t.samba_group]
map2 = grp_map[t2.samba_group]
if map2 > map1:
Logs.error("Target %r in build group %r depends on target %r from later build group %r" % (
t.sname, t.samba_group, t2.sname, t2.samba_group))
ret = False
return ret
def show_final_deps(bld, tgt_list):
'''show the final dependencies for all targets'''
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
for t in tgt_list:
if not targets[t.sname] in ['LIBRARY', 'BINARY', 'PYTHON', 'SUBSYSTEM']:
continue
debug('deps: final dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s',
t.sname, t.uselib, getattr(t, 'uselib_local', []), getattr(t, 'add_objects', []))
def add_samba_attributes(bld, tgt_list):
'''ensure a target has a the required samba attributes'''
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
for t in tgt_list:
if t.name != '':
t.sname = t.name
else:
t.sname = t.target
t.samba_type = targets[t.sname]
t.samba_abspath = t.path.abspath(bld.env)
t.samba_deps_extended = t.samba_deps[:]
t.samba_includes_extended = TO_LIST(t.samba_includes)[:]
t.ccflags = getattr(t, 'samba_cflags', '')
def replace_grouping_libraries(bld, tgt_list):
'''replace dependencies based on grouping libraries
If a library is marked as a grouping library, then any target that
depends on a subsystem that is part of that grouping library gets
that dependency replaced with a dependency on the grouping library
'''
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
grouping = {}
# find our list of grouping libraries, mapped from the subsystems they depend on
for t in tgt_list:
if not getattr(t, 'grouping_library', False):
continue
for dep in t.samba_deps_extended:
bld.ASSERT(dep in targets, "grouping library target %s not declared in %s" % (dep, t.sname))
if targets[dep] == 'SUBSYSTEM':
grouping[dep] = t.sname
# now replace any dependencies on elements of grouping libraries
for t in tgt_list:
for i in range(len(t.samba_deps_extended)):
dep = t.samba_deps_extended[i]
if dep in grouping:
if t.sname != grouping[dep]:
debug("deps: target %s: replacing dependency %s with grouping library %s" % (t.sname, dep, grouping[dep]))
t.samba_deps_extended[i] = grouping[dep]
def build_direct_deps(bld, tgt_list):
'''build the direct_objects and direct_libs sets for each target'''
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
syslib_deps = LOCAL_CACHE(bld, 'SYSLIB_DEPS')
global_deps = bld.env.GLOBAL_DEPENDENCIES
global_deps_exclude = set()
for dep in global_deps:
t = bld.name_to_obj(dep, bld.env)
for d in t.samba_deps:
# prevent loops from the global dependencies list
global_deps_exclude.add(d)
global_deps_exclude.add(d + '.objlist')
for t in tgt_list:
t.direct_objects = set()
t.direct_libs = set()
t.direct_syslibs = set()
deps = t.samba_deps_extended[:]
if getattr(t, 'samba_use_global_deps', False) and not t.sname in global_deps_exclude:
deps.extend(global_deps)
for d in deps:
if d == t.sname: continue
if not d in targets:
Logs.error("Unknown dependency '%s' in '%s'" % (d, t.sname))
sys.exit(1)
if targets[d] in [ 'EMPTY', 'DISABLED' ]:
continue
if targets[d] == 'PYTHON' and targets[t.sname] != 'PYTHON' and t.sname.find('.objlist') == -1:
# this check should be more restrictive, but for now we have pidl-generated python
# code that directly depends on other python modules
Logs.error('ERROR: Target %s has dependency on python module %s' % (t.sname, d))
sys.exit(1)
if targets[d] == 'SYSLIB':
t.direct_syslibs.add(d)
if d in syslib_deps:
for implied in TO_LIST(syslib_deps[d]):
if BUILTIN_LIBRARY(bld, implied):
t.direct_objects.add(implied)
elif targets[implied] == 'SYSLIB':
t.direct_syslibs.add(implied)
elif targets[implied] in ['LIBRARY', 'MODULE']:
t.direct_libs.add(implied)
else:
Logs.error('Implied dependency %s in %s is of type %s' % (
implied, t.sname, targets[implied]))
sys.exit(1)
continue
t2 = bld.name_to_obj(d, bld.env)
if t2 is None:
Logs.error("no task %s of type %s in %s" % (d, targets[d], t.sname))
sys.exit(1)
if t2.samba_type in [ 'LIBRARY', 'MODULE' ]:
t.direct_libs.add(d)
elif t2.samba_type in [ 'SUBSYSTEM', 'ASN1', 'PYTHON' ]:
t.direct_objects.add(d)
debug('deps: built direct dependencies')
def dependency_loop(loops, t, target):
'''add a dependency loop to the loops dictionary'''
if t.sname == target:
return
if not target in loops:
loops[target] = set()
if not t.sname in loops[target]:
loops[target].add(t.sname)
def indirect_libs(bld, t, chain, loops):
'''recursively calculate the indirect library dependencies for a target
An indirect library is a library that results from a dependency on
a subsystem
'''
ret = getattr(t, 'indirect_libs', None)
if ret is not None:
return ret
ret = set()
for obj in t.direct_objects:
if obj in chain:
dependency_loop(loops, t, obj)
continue
chain.add(obj)
t2 = bld.name_to_obj(obj, bld.env)
r2 = indirect_libs(bld, t2, chain, loops)
chain.remove(obj)
ret = ret.union(t2.direct_libs)
ret = ret.union(r2)
for obj in indirect_objects(bld, t, set(), loops):
if obj in chain:
dependency_loop(loops, t, obj)
continue
chain.add(obj)
t2 = bld.name_to_obj(obj, bld.env)
r2 = indirect_libs(bld, t2, chain, loops)
chain.remove(obj)
ret = ret.union(t2.direct_libs)
ret = ret.union(r2)
t.indirect_libs = ret
return ret
def indirect_objects(bld, t, chain, loops):
'''recursively calculate the indirect object dependencies for a target
indirect objects are the set of objects from expanding the
subsystem dependencies
'''
ret = getattr(t, 'indirect_objects', None)
if ret is not None: return ret
ret = set()
for lib in t.direct_objects:
if lib in chain:
dependency_loop(loops, t, lib)
continue
chain.add(lib)
t2 = bld.name_to_obj(lib, bld.env)
r2 = indirect_objects(bld, t2, chain, loops)
chain.remove(lib)
ret = ret.union(t2.direct_objects)
ret = ret.union(r2)
t.indirect_objects = ret
return ret
def extended_objects(bld, t, chain):
'''recursively calculate the extended object dependencies for a target
extended objects are the union of:
- direct objects
- indirect objects
- direct and indirect objects of all direct and indirect libraries
'''
ret = getattr(t, 'extended_objects', None)
if ret is not None: return ret
ret = set()
ret = ret.union(t.final_objects)
for lib in t.final_libs:
if lib in chain:
continue
t2 = bld.name_to_obj(lib, bld.env)
chain.add(lib)
r2 = extended_objects(bld, t2, chain)
chain.remove(lib)
ret = ret.union(t2.final_objects)
ret = ret.union(r2)
t.extended_objects = ret
return ret
def includes_objects(bld, t, chain, inc_loops):
'''recursively calculate the includes object dependencies for a target
includes dependencies come from either library or object dependencies
'''
ret = getattr(t, 'includes_objects', None)
if ret is not None:
return ret
ret = t.direct_objects.copy()
ret = ret.union(t.direct_libs)
for obj in t.direct_objects:
if obj in chain:
dependency_loop(inc_loops, t, obj)
continue
chain.add(obj)
t2 = bld.name_to_obj(obj, bld.env)
r2 = includes_objects(bld, t2, chain, inc_loops)
chain.remove(obj)
ret = ret.union(t2.direct_objects)
ret = ret.union(r2)
for lib in t.direct_libs:
if lib in chain:
dependency_loop(inc_loops, t, lib)
continue
chain.add(lib)
t2 = bld.name_to_obj(lib, bld.env)
if t2 is None:
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
Logs.error('Target %s of type %s not found in direct_libs for %s' % (
lib, targets[lib], t.sname))
sys.exit(1)
r2 = includes_objects(bld, t2, chain, inc_loops)
chain.remove(lib)
ret = ret.union(t2.direct_objects)
ret = ret.union(r2)
t.includes_objects = ret
return ret
def break_dependency_loops(bld, tgt_list):
'''find and break dependency loops'''
loops = {}
inc_loops = {}
# build up the list of loops
for t in tgt_list:
indirect_objects(bld, t, set(), loops)
indirect_libs(bld, t, set(), loops)
includes_objects(bld, t, set(), inc_loops)
# break the loops
for t in tgt_list:
if t.sname in loops:
for attr in ['direct_objects', 'indirect_objects', 'direct_libs', 'indirect_libs']:
objs = getattr(t, attr, set())
setattr(t, attr, objs.difference(loops[t.sname]))
for loop in loops:
debug('deps: Found dependency loops for target %s : %s', loop, loops[loop])
for loop in inc_loops:
debug('deps: Found include loops for target %s : %s', loop, inc_loops[loop])
# expand the loops mapping by one level
for loop in loops.copy():
for tgt in loops[loop]:
if tgt in loops:
loops[loop] = loops[loop].union(loops[tgt])
for loop in inc_loops.copy():
for tgt in inc_loops[loop]:
if tgt in inc_loops:
inc_loops[loop] = inc_loops[loop].union(inc_loops[tgt])
# expand indirect subsystem and library loops
for loop in loops.copy():
t = bld.name_to_obj(loop, bld.env)
if t.samba_type in ['SUBSYSTEM']:
loops[loop] = loops[loop].union(t.indirect_objects)
loops[loop] = loops[loop].union(t.direct_objects)
if t.samba_type in ['LIBRARY','PYTHON']:
loops[loop] = loops[loop].union(t.indirect_libs)
loops[loop] = loops[loop].union(t.direct_libs)
if loop in loops[loop]:
loops[loop].remove(loop)
# expand indirect includes loops
for loop in inc_loops.copy():
t = bld.name_to_obj(loop, bld.env)
inc_loops[loop] = inc_loops[loop].union(t.includes_objects)
if loop in inc_loops[loop]:
inc_loops[loop].remove(loop)
# add in the replacement dependencies
for t in tgt_list:
for loop in loops:
for attr in ['indirect_objects', 'indirect_libs']:
objs = getattr(t, attr, set())
if loop in objs:
diff = loops[loop].difference(objs)
if t.sname in diff:
diff.remove(t.sname)
if diff:
debug('deps: Expanded target %s of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff)
objs = objs.union(diff)
setattr(t, attr, objs)
for loop in inc_loops:
objs = getattr(t, 'includes_objects', set())
if loop in objs:
diff = inc_loops[loop].difference(objs)
if t.sname in diff:
diff.remove(t.sname)
if diff:
debug('deps: Expanded target %s includes of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff)
objs = objs.union(diff)
setattr(t, 'includes_objects', objs)
def reduce_objects(bld, tgt_list):
'''reduce objects by looking for indirect object dependencies'''
rely_on = {}
for t in tgt_list:
t.extended_objects = None
changed = False
for type in ['BINARY', 'PYTHON', 'LIBRARY']:
for t in tgt_list:
if t.samba_type != type: continue
# if we will indirectly link to a target then we don't need it
new = t.final_objects.copy()
for l in t.final_libs:
t2 = bld.name_to_obj(l, bld.env)
t2_obj = extended_objects(bld, t2, set())
dup = new.intersection(t2_obj)
if t.sname in rely_on:
dup = dup.difference(rely_on[t.sname])
if dup:
debug('deps: removing dups from %s of type %s: %s also in %s %s',
t.sname, t.samba_type, dup, t2.samba_type, l)
new = new.difference(dup)
changed = True
if not l in rely_on:
rely_on[l] = set()
rely_on[l] = rely_on[l].union(dup)
t.final_objects = new
if not changed:
return False
# add back in any objects that were relied upon by the reduction rules
for r in rely_on:
t = bld.name_to_obj(r, bld.env)
t.final_objects = t.final_objects.union(rely_on[r])
return True
def show_library_loop(bld, lib1, lib2, path, seen):
'''show the detailed path of a library loop between lib1 and lib2'''
t = bld.name_to_obj(lib1, bld.env)
if not lib2 in getattr(t, 'final_libs', set()):
return
for d in t.samba_deps_extended:
if d in seen:
continue
seen.add(d)
path2 = path + '=>' + d
if d == lib2:
Logs.warn('library loop path: ' + path2)
return
show_library_loop(bld, d, lib2, path2, seen)
seen.remove(d)
def calculate_final_deps(bld, tgt_list, loops):
'''calculate the final library and object dependencies'''
for t in tgt_list:
# start with the maximum possible list
t.final_libs = t.direct_libs.union(indirect_libs(bld, t, set(), loops))
t.final_objects = t.direct_objects.union(indirect_objects(bld, t, set(), loops))
for t in tgt_list:
# don't depend on ourselves
if t.sname in t.final_libs:
t.final_libs.remove(t.sname)
if t.sname in t.final_objects:
t.final_objects.remove(t.sname)
# handle any non-shared binaries
for t in tgt_list:
if t.samba_type == 'BINARY' and bld.NONSHARED_BINARY(t.sname):
subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS')
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
# replace lib deps with objlist deps
for l in t.final_libs:
objname = l + '.objlist'
t2 = bld.name_to_obj(objname, bld.env)
if t2 is None:
Logs.error('ERROR: subsystem %s not found' % objname)
sys.exit(1)
t.final_objects.add(objname)
t.final_objects = t.final_objects.union(extended_objects(bld, t2, set()))
if l in subsystem_list:
# its a subsystem - we also need the contents of any modules
for d in subsystem_list[l]:
module_name = d['TARGET']
if targets[module_name] == 'LIBRARY':
objname = module_name + '.objlist'
elif targets[module_name] == 'SUBSYSTEM':
objname = module_name
else:
continue
t2 = bld.name_to_obj(objname, bld.env)
if t2 is None:
Logs.error('ERROR: subsystem %s not found' % objname)
sys.exit(1)
t.final_objects.add(objname)
t.final_objects = t.final_objects.union(extended_objects(bld, t2, set()))
t.final_libs = set()
# find any library loops
for t in tgt_list:
if t.samba_type in ['LIBRARY', 'PYTHON']:
for l in t.final_libs.copy():
t2 = bld.name_to_obj(l, bld.env)
if t.sname in t2.final_libs:
if getattr(bld.env, "ALLOW_CIRCULAR_LIB_DEPENDENCIES", False):
# we could break this in either direction. If one of the libraries
# has a version number, and will this be distributed publicly, then
# we should make it the lower level library in the DAG
Logs.warn('deps: removing library loop %s from %s' % (t.sname, t2.sname))
dependency_loop(loops, t, t2.sname)
t2.final_libs.remove(t.sname)
else:
Logs.error('ERROR: circular library dependency between %s and %s'
% (t.sname, t2.sname))
show_library_loop(bld, t.sname, t2.sname, t.sname, set())
show_library_loop(bld, t2.sname, t.sname, t2.sname, set())
sys.exit(1)
for loop in loops:
debug('deps: Found dependency loops for target %s : %s', loop, loops[loop])
# we now need to make corrections for any library loops we broke up
# any target that depended on the target of the loop and doesn't
# depend on the source of the loop needs to get the loop source added
for type in ['BINARY','PYTHON','LIBRARY','BINARY']:
for t in tgt_list:
if t.samba_type != type: continue
for loop in loops:
if loop in t.final_libs:
diff = loops[loop].difference(t.final_libs)
if t.sname in diff:
diff.remove(t.sname)
if t.sname in diff:
diff.remove(t.sname)
# make sure we don't recreate the loop again!
for d in diff.copy():
t2 = bld.name_to_obj(d, bld.env)
if t2.samba_type == 'LIBRARY':
if t.sname in t2.final_libs:
debug('deps: removing expansion %s from %s', d, t.sname)
diff.remove(d)
if diff:
debug('deps: Expanded target %s by loop %s libraries (loop %s) %s', t.sname, loop,
loops[loop], diff)
t.final_libs = t.final_libs.union(diff)
# remove objects that are also available in linked libs
count = 0
while reduce_objects(bld, tgt_list):
count += 1
if count > 100:
Logs.warn("WARNING: Unable to remove all inter-target object duplicates")
break
debug('deps: Object reduction took %u iterations', count)
# add in any syslib dependencies
for t in tgt_list:
if not t.samba_type in ['BINARY','PYTHON','LIBRARY','SUBSYSTEM']:
continue
syslibs = set()
for d in t.final_objects:
t2 = bld.name_to_obj(d, bld.env)
syslibs = syslibs.union(t2.direct_syslibs)
# this adds the indirect syslibs as well, which may not be needed
# depending on the linker flags
for d in t.final_libs:
t2 = bld.name_to_obj(d, bld.env)
syslibs = syslibs.union(t2.direct_syslibs)
t.final_syslibs = syslibs
# find any unresolved library loops
lib_loop_error = False
for t in tgt_list:
if t.samba_type in ['LIBRARY', 'PYTHON']:
for l in t.final_libs.copy():
t2 = bld.name_to_obj(l, bld.env)
if t.sname in t2.final_libs:
Logs.error('ERROR: Unresolved library loop %s from %s' % (t.sname, t2.sname))
lib_loop_error = True
if lib_loop_error:
sys.exit(1)
debug('deps: removed duplicate dependencies')
def show_dependencies(bld, target, seen):
'''recursively show the dependencies of target'''
if target in seen:
return
t = bld.name_to_obj(target, bld.env)
if t is None:
Logs.error("ERROR: Unable to find target '%s'" % target)
sys.exit(1)
Logs.info('%s(OBJECTS): %s' % (target, t.direct_objects))
Logs.info('%s(LIBS): %s' % (target, t.direct_libs))
Logs.info('%s(SYSLIBS): %s' % (target, t.direct_syslibs))
seen.add(target)
for t2 in t.direct_objects:
show_dependencies(bld, t2, seen)
def show_object_duplicates(bld, tgt_list):
'''show a list of object files that are included in more than
one library or binary'''
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
used_by = {}
Logs.info("showing duplicate objects")
for t in tgt_list:
if not targets[t.sname] in [ 'LIBRARY', 'PYTHON' ]:
continue
for n in getattr(t, 'final_objects', set()):
t2 = bld.name_to_obj(n, bld.env)
if not n in used_by:
used_by[n] = set()
used_by[n].add(t.sname)
for n in used_by:
if len(used_by[n]) > 1:
Logs.info("target '%s' is used by %s" % (n, used_by[n]))
Logs.info("showing indirect dependency counts (sorted by count)")
def indirect_count(t1, t2):
return len(t2.indirect_objects) - len(t1.indirect_objects)
sorted_list = sorted(tgt_list, cmp=indirect_count)
for t in sorted_list:
if len(t.indirect_objects) > 1:
Logs.info("%s depends on %u indirect objects" % (t.sname, len(t.indirect_objects)))
######################################################################
# this provides a way to save our dependency calculations between runs
savedeps_version = 3
savedeps_inputs = ['samba_deps', 'samba_includes', 'local_include', 'local_include_first', 'samba_cflags',
'source', 'grouping_library', 'samba_ldflags', 'allow_undefined_symbols',
'use_global_deps', 'global_include' ]
savedeps_outputs = ['uselib', 'uselib_local', 'add_objects', 'includes', 'ccflags', 'ldflags', 'samba_deps_extended']
savedeps_outenv = ['INC_PATHS']
savedeps_envvars = ['NONSHARED_BINARIES', 'GLOBAL_DEPENDENCIES', 'EXTRA_CFLAGS', 'EXTRA_LDFLAGS', 'EXTRA_INCLUDES' ]
savedeps_caches = ['GLOBAL_DEPENDENCIES', 'TARGET_TYPE', 'INIT_FUNCTIONS', 'SYSLIB_DEPS']
savedeps_files = ['buildtools/wafsamba/samba_deps.py']
def save_samba_deps(bld, tgt_list):
'''save the dependency calculations between builds, to make
further builds faster'''
denv = Environment.Environment()
denv.version = savedeps_version
denv.savedeps_inputs = savedeps_inputs
denv.savedeps_outputs = savedeps_outputs
denv.input = {}
denv.output = {}
denv.outenv = {}
denv.caches = {}
denv.envvar = {}
denv.files = {}
for f in savedeps_files:
denv.files[f] = os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime
for c in savedeps_caches:
denv.caches[c] = LOCAL_CACHE(bld, c)
for e in savedeps_envvars:
denv.envvar[e] = bld.env[e]
for t in tgt_list:
# save all the input attributes for each target
tdeps = {}
for attr in savedeps_inputs:
v = getattr(t, attr, None)
if v is not None:
tdeps[attr] = v
if tdeps != {}:
denv.input[t.sname] = tdeps
# save all the output attributes for each target
tdeps = {}
for attr in savedeps_outputs:
v = getattr(t, attr, None)
if v is not None:
tdeps[attr] = v
if tdeps != {}:
denv.output[t.sname] = tdeps
tdeps = {}
for attr in savedeps_outenv:
if attr in t.env:
tdeps[attr] = t.env[attr]
if tdeps != {}:
denv.outenv[t.sname] = tdeps
depsfile = os.path.join(bld.bdir, "sambadeps")
denv.store(depsfile)
def load_samba_deps(bld, tgt_list):
'''load a previous set of build dependencies if possible'''
depsfile = os.path.join(bld.bdir, "sambadeps")
denv = Environment.Environment()
try:
debug('deps: checking saved dependencies')
denv.load(depsfile)
if (denv.version != savedeps_version or
denv.savedeps_inputs != savedeps_inputs or
denv.savedeps_outputs != savedeps_outputs):
return False
except:
return False
# check if critical files have changed
for f in savedeps_files:
if f not in denv.files:
return False
if denv.files[f] != os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime:
return False
# check if caches are the same
for c in savedeps_caches:
if c not in denv.caches or denv.caches[c] != LOCAL_CACHE(bld, c):
return False
# check if caches are the same
for e in savedeps_envvars:
if e not in denv.envvar or denv.envvar[e] != bld.env[e]:
return False
# check inputs are the same
for t in tgt_list:
tdeps = {}
for attr in savedeps_inputs:
v = getattr(t, attr, None)
if v is not None:
tdeps[attr] = v
if t.sname in denv.input:
olddeps = denv.input[t.sname]
else:
olddeps = {}
if tdeps != olddeps:
#print '%s: \ntdeps=%s \nodeps=%s' % (t.sname, tdeps, olddeps)
return False
# put outputs in place
for t in tgt_list:
if not t.sname in denv.output: continue
tdeps = denv.output[t.sname]
for a in tdeps:
setattr(t, a, tdeps[a])
# put output env vars in place
for t in tgt_list:
if not t.sname in denv.outenv: continue
tdeps = denv.outenv[t.sname]
for a in tdeps:
t.env[a] = tdeps[a]
debug('deps: loaded saved dependencies')
return True
def check_project_rules(bld):
'''check the project rules - ensuring the targets are sane'''
loops = {}
inc_loops = {}
tgt_list = get_tgt_list(bld)
add_samba_attributes(bld, tgt_list)
force_project_rules = (Options.options.SHOWDEPS or
Options.options.SHOW_DUPLICATES)
if not force_project_rules and load_samba_deps(bld, tgt_list):
return
global tstart
tstart = time.clock()
bld.new_rules = True
Logs.info("Checking project rules ...")
debug('deps: project rules checking started')
expand_subsystem_deps(bld)
debug("deps: expand_subsystem_deps: %f" % (time.clock() - tstart))
replace_grouping_libraries(bld, tgt_list)
debug("deps: replace_grouping_libraries: %f" % (time.clock() - tstart))
build_direct_deps(bld, tgt_list)
debug("deps: build_direct_deps: %f" % (time.clock() - tstart))
break_dependency_loops(bld, tgt_list)
debug("deps: break_dependency_loops: %f" % (time.clock() - tstart))
if Options.options.SHOWDEPS:
show_dependencies(bld, Options.options.SHOWDEPS, set())
calculate_final_deps(bld, tgt_list, loops)
debug("deps: calculate_final_deps: %f" % (time.clock() - tstart))
if Options.options.SHOW_DUPLICATES:
show_object_duplicates(bld, tgt_list)
# run the various attribute generators
for f in [ build_dependencies, build_includes, add_init_functions ]:
debug('deps: project rules checking %s', f)
for t in tgt_list: f(t)
debug("deps: %s: %f" % (f, time.clock() - tstart))
debug('deps: project rules stage1 completed')
#check_orphaned_targets(bld, tgt_list)
if not check_duplicate_sources(bld, tgt_list):
Logs.error("Duplicate sources present - aborting")
sys.exit(1)
debug("deps: check_duplicate_sources: %f" % (time.clock() - tstart))
if not check_group_ordering(bld, tgt_list):
Logs.error("Bad group ordering - aborting")
sys.exit(1)
debug("deps: check_group_ordering: %f" % (time.clock() - tstart))
show_final_deps(bld, tgt_list)
debug("deps: show_final_deps: %f" % (time.clock() - tstart))
debug('deps: project rules checking completed - %u targets checked',
len(tgt_list))
if not bld.is_install:
save_samba_deps(bld, tgt_list)
debug("deps: save_samba_deps: %f" % (time.clock() - tstart))
Logs.info("Project rules pass")
def CHECK_PROJECT_RULES(bld):
'''enable checking of project targets for sanity'''
if bld.env.added_project_rules:
return
bld.env.added_project_rules = True
bld.add_pre_fun(check_project_rules)
Build.BuildContext.CHECK_PROJECT_RULES = CHECK_PROJECT_RULES
ntdb-1.0/buildtools/wafsamba/samba_dist.py 0000664 0000000 0000000 00000017336 12241515307 0020734 0 ustar 00root root 0000000 0000000 # customised version of 'waf dist' for Samba tools
# uses git ls-files to get file lists
import Utils, os, sys, tarfile, stat, Scripting, Logs, Options
from samba_utils import *
dist_dirs = None
dist_files = None
dist_blacklist = ""
def add_symlink(tar, fname, abspath, basedir):
'''handle symlinks to directories that may move during packaging'''
if not os.path.islink(abspath):
return False
tinfo = tar.gettarinfo(name=abspath, arcname=fname)
tgt = os.readlink(abspath)
if dist_dirs:
# we need to find the target relative to the main directory
# this is here to cope with symlinks into the buildtools
# directory from within the standalone libraries in Samba. For example,
# a symlink to ../../builtools/scripts/autogen-waf.sh needs
# to be rewritten as a symlink to buildtools/scripts/autogen-waf.sh
# when the tarball for talloc is built
# the filename without the appname-version
rel_fname = '/'.join(fname.split('/')[1:])
# join this with the symlink target
tgt_full = os.path.join(os.path.dirname(rel_fname), tgt)
# join with the base directory
tgt_base = os.path.normpath(os.path.join(basedir, tgt_full))
# see if this is inside one of our dist_dirs
for dir in dist_dirs.split():
if dir.find(':') != -1:
destdir=dir.split(':')[1]
dir=dir.split(':')[0]
else:
destdir = '.'
if dir == basedir:
# internal links don't get rewritten
continue
if dir == tgt_base[0:len(dir)] and tgt_base[len(dir)] == '/':
new_tgt = destdir + tgt_base[len(dir):]
tinfo.linkname = new_tgt
break
tinfo.uid = 0
tinfo.gid = 0
tinfo.uname = 'root'
tinfo.gname = 'root'
tar.addfile(tinfo)
return True
def add_tarfile(tar, fname, abspath, basedir):
'''add a file to the tarball'''
if add_symlink(tar, fname, abspath, basedir):
return
try:
tinfo = tar.gettarinfo(name=abspath, arcname=fname)
except OSError:
Logs.error('Unable to find file %s - missing from git checkout?' % abspath)
sys.exit(1)
tinfo.uid = 0
tinfo.gid = 0
tinfo.uname = 'root'
tinfo.gname = 'root'
fh = open(abspath)
tar.addfile(tinfo, fileobj=fh)
fh.close()
def vcs_dir_contents(path):
"""Return the versioned files under a path.
:return: List of paths relative to path
"""
repo = path
while repo != "/":
if os.path.isdir(os.path.join(repo, ".git")):
ls_files_cmd = [ 'git', 'ls-files', '--full-name',
os_path_relpath(path, repo) ]
cwd = None
env = dict(os.environ)
env["GIT_DIR"] = os.path.join(repo, ".git")
break
elif os.path.isdir(os.path.join(repo, ".bzr")):
ls_files_cmd = [ 'bzr', 'ls', '--recursive', '--versioned',
os_path_relpath(path, repo)]
cwd = repo
env = None
break
repo = os.path.dirname(repo)
if repo == "/":
raise Exception("unsupported or no vcs for %s" % path)
return Utils.cmd_output(ls_files_cmd, cwd=cwd, env=env).split()
def dist(appname='', version=''):
def add_files_to_tarball(tar, srcdir, srcsubdir, dstdir, dstsubdir, blacklist, files):
if blacklist is None:
blacklist = []
for f in files:
abspath = os.path.join(srcdir, f)
if srcsubdir != '.':
f = f[len(srcsubdir)+1:]
# Remove files in the blacklist
if f in blacklist:
continue
blacklisted = False
# Remove directories in the blacklist
for d in blacklist:
if f.startswith(d):
blacklisted = True
if blacklisted:
continue
if os.path.isdir(abspath):
continue
if dstsubdir != '.':
f = dstsubdir + '/' + f
fname = dstdir + '/' + f
add_tarfile(tar, fname, abspath, srcsubdir)
def list_directory_files(abspath):
out_files = []
for root, dirs, files in os.walk(abspath):
for f in files:
out_files.append(os.path.join(root, f))
return out_files
if not isinstance(appname, str) or not appname:
# this copes with a mismatch in the calling arguments for dist()
appname = Utils.g_module.APPNAME
version = Utils.g_module.VERSION
if not version:
version = Utils.g_module.VERSION
srcdir = os.path.normpath(os.path.join(os.path.dirname(Utils.g_module.root_path), Utils.g_module.srcdir))
if not dist_dirs:
Logs.error('You must use samba_dist.DIST_DIRS() to set which directories to package')
sys.exit(1)
dist_base = '%s-%s' % (appname, version)
if Options.options.SIGN_RELEASE:
dist_name = '%s.tar' % (dist_base)
tar = tarfile.open(dist_name, 'w')
else:
dist_name = '%s.tar.gz' % (dist_base)
tar = tarfile.open(dist_name, 'w:gz')
blacklist = dist_blacklist.split()
for dir in dist_dirs.split():
if dir.find(':') != -1:
destdir=dir.split(':')[1]
dir=dir.split(':')[0]
else:
destdir = '.'
absdir = os.path.join(srcdir, dir)
try:
files = vcs_dir_contents(absdir)
except Exception, e:
Logs.error('unable to get contents of %s: %s' % (absdir, e))
sys.exit(1)
add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files)
if dist_files:
for file in dist_files.split():
if file.find(':') != -1:
destfile = file.split(':')[1]
file = file.split(':')[0]
else:
destfile = file
absfile = os.path.join(srcdir, file)
if os.path.isdir(absfile):
destdir = destfile
dir = file
files = list_directory_files(dir)
add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files)
else:
fname = dist_base + '/' + destfile
add_tarfile(tar, fname, absfile, destfile)
tar.close()
if Options.options.SIGN_RELEASE:
import gzip
try:
os.unlink(dist_name + '.asc')
except OSError:
pass
cmd = "gpg --detach-sign --armor " + dist_name
os.system(cmd)
uncompressed_tar = open(dist_name, 'rb')
compressed_tar = gzip.open(dist_name + '.gz', 'wb')
while 1:
buffer = uncompressed_tar.read(1048576)
if buffer:
compressed_tar.write(buffer)
else:
break
uncompressed_tar.close()
compressed_tar.close()
os.unlink(dist_name)
Logs.info('Created %s.gz %s.asc' % (dist_name, dist_name))
dist_name = dist_name + '.gz'
else:
Logs.info('Created %s' % dist_name)
return dist_name
@conf
def DIST_DIRS(dirs):
'''set the directories to package, relative to top srcdir'''
global dist_dirs
if not dist_dirs:
dist_dirs = dirs
@conf
def DIST_FILES(files, extend=False):
'''set additional files for packaging, relative to top srcdir'''
global dist_files
if not dist_files:
dist_files = files
elif extend:
dist_files = dist_files + " " + files
@conf
def DIST_BLACKLIST(blacklist):
'''set the files to exclude from packaging, relative to top srcdir'''
global dist_blacklist
if not dist_blacklist:
dist_blacklist = blacklist
Scripting.dist = dist
ntdb-1.0/buildtools/wafsamba/samba_headers.py 0000664 0000000 0000000 00000014642 12241515307 0021401 0 ustar 00root root 0000000 0000000 # specialist handling of header files for Samba
import Build, re, Task, TaskGen, shutil, sys, Logs
from samba_utils import *
def header_install_path(header, header_path):
'''find the installation path for a header, given a header_path option'''
if not header_path:
return ''
if not isinstance(header_path, list):
return header_path
for (p1, dir) in header_path:
for p2 in TO_LIST(p1):
if fnmatch.fnmatch(header, p2):
return dir
# default to current path
return ''
re_header = re.compile('^\s*#\s*include[ \t]*"([^"]+)"', re.I | re.M)
# a dictionary mapping source header paths to public header paths
header_map = {}
def find_suggested_header(hpath):
'''find a suggested header path to use'''
base = os.path.basename(hpath)
ret = []
for h in header_map:
if os.path.basename(h) == base:
ret.append('<%s>' % header_map[h])
ret.append('"%s"' % h)
return ret
def create_public_header(task):
'''create a public header from a private one, output within the build tree'''
src = task.inputs[0].abspath(task.env)
tgt = task.outputs[0].bldpath(task.env)
if os.path.exists(tgt):
os.unlink(tgt)
relsrc = os_path_relpath(src, task.env.TOPDIR)
infile = open(src, mode='r')
outfile = open(tgt, mode='w')
linenumber = 0
search_paths = [ '', task.env.RELPATH ]
for i in task.env.EXTRA_INCLUDES:
if i.startswith('#'):
search_paths.append(i[1:])
for line in infile:
linenumber += 1
# allow some straight substitutions
if task.env.public_headers_replace and line.strip() in task.env.public_headers_replace:
outfile.write(task.env.public_headers_replace[line.strip()] + '\n')
continue
# see if its an include line
m = re_header.match(line)
if m is None:
outfile.write(line)
continue
# its an include, get the header path
hpath = m.group(1)
if hpath.startswith("bin/default/"):
hpath = hpath[12:]
# some are always allowed
if task.env.public_headers_skip and hpath in task.env.public_headers_skip:
outfile.write(line)
continue
# work out the header this refers to
found = False
for s in search_paths:
p = os.path.normpath(os.path.join(s, hpath))
if p in header_map:
outfile.write("#include <%s>\n" % header_map[p])
found = True
break
if found:
continue
if task.env.public_headers_allow_broken:
Logs.warn("Broken public header include '%s' in '%s'" % (hpath, relsrc))
outfile.write(line)
continue
# try to be nice to the developer by suggesting an alternative
suggested = find_suggested_header(hpath)
outfile.close()
os.unlink(tgt)
sys.stderr.write("%s:%u:Error: unable to resolve public header %s (maybe try one of %s)\n" % (
os.path.relpath(src, os.getcwd()), linenumber, hpath, suggested))
raise Utils.WafError("Unable to resolve header path '%s' in public header '%s' in directory %s" % (
hpath, relsrc, task.env.RELPATH))
infile.close()
outfile.close()
def public_headers_simple(bld, public_headers, header_path=None, public_headers_install=True):
'''install some headers - simple version, no munging needed
'''
if not public_headers_install:
return
for h in TO_LIST(public_headers):
inst_path = header_install_path(h, header_path)
if h.find(':') != -1:
s = h.split(":")
h_name = s[0]
inst_name = s[1]
else:
h_name = h
inst_name = os.path.basename(h)
bld.INSTALL_FILES('${INCLUDEDIR}', h_name, destname=inst_name)
def PUBLIC_HEADERS(bld, public_headers, header_path=None, public_headers_install=True):
'''install some headers
header_path may either be a string that is added to the INCLUDEDIR,
or it can be a dictionary of wildcard patterns which map to destination
directories relative to INCLUDEDIR
'''
bld.SET_BUILD_GROUP('final')
if not bld.env.build_public_headers:
# in this case no header munging neeeded. Used for tdb, talloc etc
public_headers_simple(bld, public_headers, header_path=header_path,
public_headers_install=public_headers_install)
return
# create the public header in the given path
# in the build tree
for h in TO_LIST(public_headers):
inst_path = header_install_path(h, header_path)
if h.find(':') != -1:
s = h.split(":")
h_name = s[0]
inst_name = s[1]
else:
h_name = h
inst_name = os.path.basename(h)
relpath1 = os_path_relpath(bld.srcnode.abspath(), bld.curdir)
relpath2 = os_path_relpath(bld.curdir, bld.srcnode.abspath())
targetdir = os.path.normpath(os.path.join(relpath1, bld.env.build_public_headers, inst_path))
if not os.path.exists(os.path.join(bld.curdir, targetdir)):
raise Utils.WafError("missing source directory %s for public header %s" % (targetdir, inst_name))
target = os.path.join(targetdir, inst_name)
# the source path of the header, relative to the top of the source tree
src_path = os.path.normpath(os.path.join(relpath2, h_name))
# the install path of the header, relative to the public include directory
target_path = os.path.normpath(os.path.join(inst_path, inst_name))
header_map[src_path] = target_path
t = bld.SAMBA_GENERATOR('HEADER_%s/%s/%s' % (relpath2, inst_path, inst_name),
group='headers',
rule=create_public_header,
source=h_name,
target=target)
t.env.RELPATH = relpath2
t.env.TOPDIR = bld.srcnode.abspath()
if not bld.env.public_headers_list:
bld.env.public_headers_list = []
bld.env.public_headers_list.append(os.path.join(inst_path, inst_name))
if public_headers_install:
bld.INSTALL_FILES('${INCLUDEDIR}',
target,
destname=os.path.join(inst_path, inst_name), flat=True)
Build.BuildContext.PUBLIC_HEADERS = PUBLIC_HEADERS
ntdb-1.0/buildtools/wafsamba/samba_install.py 0000664 0000000 0000000 00000017467 12241515307 0021444 0 ustar 00root root 0000000 0000000 ###########################
# this handles the magic we need to do for installing
# with all the configure options that affect rpath and shared
# library use
import Options
from TaskGen import feature, before, after
from samba_utils import *
@feature('install_bin')
@after('apply_core')
@before('apply_link', 'apply_obj_vars')
def install_binary(self):
'''install a binary, taking account of the different rpath varients'''
bld = self.bld
# get the ldflags we will use for install and build
install_ldflags = install_rpath(self)
build_ldflags = build_rpath(bld)
if not Options.is_install:
# just need to set rpath if we are not installing
self.env.RPATH = build_ldflags
return
# work out the install path, expanding variables
install_path = getattr(self, 'samba_inst_path', None) or '${BINDIR}'
install_path = bld.EXPAND_VARIABLES(install_path)
orig_target = os.path.basename(self.target)
if install_ldflags != build_ldflags:
# we will be creating a new target name, and using that for the
# install link. That stops us from overwriting the existing build
# target, which has different ldflags
self.target += '.inst'
# setup the right rpath link flags for the install
self.env.RPATH = install_ldflags
if not self.samba_install:
# this binary is marked not to be installed
return
# tell waf to install the right binary
bld.install_as(os.path.join(install_path, orig_target),
os.path.join(self.path.abspath(bld.env), self.target),
chmod=MODE_755)
@feature('install_lib')
@after('apply_core')
@before('apply_link', 'apply_obj_vars')
def install_library(self):
'''install a library, taking account of the different rpath varients'''
if getattr(self, 'done_install_library', False):
return
bld = self.bld
install_ldflags = install_rpath(self)
build_ldflags = build_rpath(bld)
if not Options.is_install or not getattr(self, 'samba_install', True):
# just need to set the build rpath if we are not installing
self.env.RPATH = build_ldflags
return
# setup the install path, expanding variables
install_path = getattr(self, 'samba_inst_path', None)
if install_path is None:
if getattr(self, 'private_library', False):
install_path = '${PRIVATELIBDIR}'
else:
install_path = '${LIBDIR}'
install_path = bld.EXPAND_VARIABLES(install_path)
target_name = self.target
if install_ldflags != build_ldflags:
# we will be creating a new target name, and using that for the
# install link. That stops us from overwriting the existing build
# target, which has different ldflags
self.done_install_library = True
t = self.clone('default')
t.posted = False
t.target += '.inst'
self.env.RPATH = build_ldflags
else:
t = self
t.env.RPATH = install_ldflags
dev_link = None
# in the following the names are:
# - inst_name is the name with .inst. in it, in the build
# directory
# - install_name is the name in the install directory
# - install_link is a symlink in the install directory, to install_name
if getattr(self, 'samba_realname', None):
install_name = self.samba_realname
install_link = None
if getattr(self, 'soname', ''):
install_link = self.soname
if getattr(self, 'samba_type', None) == 'PYTHON':
inst_name = bld.make_libname(t.target, nolibprefix=True, python=True)
else:
inst_name = bld.make_libname(t.target)
elif self.vnum:
vnum_base = self.vnum.split('.')[0]
install_name = bld.make_libname(target_name, version=self.vnum)
install_link = bld.make_libname(target_name, version=vnum_base)
inst_name = bld.make_libname(t.target)
if not self.private_library:
# only generate the dev link for non-bundled libs
dev_link = bld.make_libname(target_name)
elif getattr(self, 'soname', ''):
install_name = bld.make_libname(target_name)
install_link = self.soname
inst_name = bld.make_libname(t.target)
else:
install_name = bld.make_libname(target_name)
install_link = None
inst_name = bld.make_libname(t.target)
if t.env.SONAME_ST:
# ensure we get the right names in the library
if install_link:
t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_link)
else:
t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_name)
t.env.SONAME_ST = ''
# tell waf to install the library
bld.install_as(os.path.join(install_path, install_name),
os.path.join(self.path.abspath(bld.env), inst_name),
chmod=MODE_755)
if install_link and install_link != install_name:
# and the symlink if needed
bld.symlink_as(os.path.join(install_path, install_link), os.path.basename(install_name))
if dev_link:
bld.symlink_as(os.path.join(install_path, dev_link), os.path.basename(install_name))
@feature('cshlib')
@after('apply_implib')
@before('apply_vnum')
def apply_soname(self):
'''install a library, taking account of the different rpath varients'''
if self.env.SONAME_ST and getattr(self, 'soname', ''):
self.env.append_value('LINKFLAGS', self.env.SONAME_ST % self.soname)
self.env.SONAME_ST = ''
@feature('cshlib')
@after('apply_implib')
@before('apply_vnum')
def apply_vscript(self):
'''add version-script arguments to library build'''
if self.env.HAVE_LD_VERSION_SCRIPT and getattr(self, 'version_script', ''):
self.env.append_value('LINKFLAGS', "-Wl,--version-script=%s" %
self.version_script)
self.version_script = None
##############################
# handle the creation of links for libraries and binaries in the build tree
@feature('symlink_lib')
@after('apply_link')
def symlink_lib(self):
'''symlink a shared lib'''
if self.target.endswith('.inst'):
return
blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env))
libpath = self.link_task.outputs[0].abspath(self.env)
# calculat the link target and put it in the environment
soext=""
vnum = getattr(self, 'vnum', None)
if vnum is not None:
soext = '.' + vnum.split('.')[0]
link_target = getattr(self, 'link_name', '')
if link_target == '':
basename = os.path.basename(self.bld.make_libname(self.target, version=soext))
if getattr(self, "private_library", False):
link_target = '%s/private/%s' % (LIB_PATH, basename)
else:
link_target = '%s/%s' % (LIB_PATH, basename)
link_target = os.path.join(blddir, link_target)
if os.path.lexists(link_target):
if os.path.islink(link_target) and os.readlink(link_target) == libpath:
return
os.unlink(link_target)
link_container = os.path.dirname(link_target)
if not os.path.isdir(link_container):
os.makedirs(link_container)
os.symlink(libpath, link_target)
@feature('symlink_bin')
@after('apply_link')
def symlink_bin(self):
'''symlink a binary into the build directory'''
if self.target.endswith('.inst'):
return
blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env))
if not self.link_task.outputs or not self.link_task.outputs[0]:
raise Utils.WafError('no outputs found for %s in symlink_bin' % self.name)
binpath = self.link_task.outputs[0].abspath(self.env)
bldpath = os.path.join(self.bld.env.BUILD_DIRECTORY, self.link_task.outputs[0].name)
if os.path.lexists(bldpath):
if os.path.islink(bldpath) and os.readlink(bldpath) == binpath:
return
os.unlink(bldpath)
os.symlink(binpath, bldpath)
ntdb-1.0/buildtools/wafsamba/samba_optimisation.py 0000664 0000000 0000000 00000011111 12241515307 0022471 0 ustar 00root root 0000000 0000000 # This file contains waf optimisations for Samba
# most of these optimisations are possible because of the restricted build environment
# that Samba has. For example, Samba doesn't attempt to cope with Win32 paths during the
# build, and Samba doesn't need build varients
# overall this makes some build tasks quite a bit faster
from TaskGen import feature, after
import preproc, Task
@feature('cc', 'cxx')
@after('apply_type_vars', 'apply_lib_vars', 'apply_core')
def apply_incpaths(self):
lst = []
try:
kak = self.bld.kak
except AttributeError:
kak = self.bld.kak = {}
# TODO move the uselib processing out of here
for lib in self.to_list(self.uselib):
for path in self.env['CPPPATH_' + lib]:
if not path in lst:
lst.append(path)
if preproc.go_absolute:
for path in preproc.standard_includes:
if not path in lst:
lst.append(path)
for path in self.to_list(self.includes):
if not path in lst:
if preproc.go_absolute or path[0] != '/': # os.path.isabs(path):
lst.append(path)
else:
self.env.prepend_value('CPPPATH', path)
for path in lst:
node = None
if path[0] == '/': # os.path.isabs(path):
if preproc.go_absolute:
node = self.bld.root.find_dir(path)
elif path[0] == '#':
node = self.bld.srcnode
if len(path) > 1:
try:
node = kak[path]
except KeyError:
kak[path] = node = node.find_dir(path[1:])
else:
try:
node = kak[(self.path.id, path)]
except KeyError:
kak[(self.path.id, path)] = node = self.path.find_dir(path)
if node:
self.env.append_value('INC_PATHS', node)
@feature('cc')
@after('apply_incpaths')
def apply_obj_vars_cc(self):
"""after apply_incpaths for INC_PATHS"""
env = self.env
app = env.append_unique
cpppath_st = env['CPPPATH_ST']
lss = env['_CCINCFLAGS']
try:
cac = self.bld.cac
except AttributeError:
cac = self.bld.cac = {}
# local flags come first
# set the user-defined includes paths
for i in env['INC_PATHS']:
try:
lss.extend(cac[i.id])
except KeyError:
cac[i.id] = [cpppath_st % i.bldpath(env), cpppath_st % i.srcpath(env)]
lss.extend(cac[i.id])
env['_CCINCFLAGS'] = lss
# set the library include paths
for i in env['CPPPATH']:
app('_CCINCFLAGS', cpppath_st % i)
import Node, Environment
def vari(self):
return "default"
Environment.Environment.variant = vari
def variant(self, env):
if not env: return 0
elif self.id & 3 == Node.FILE: return 0
else: return "default"
Node.Node.variant = variant
import TaskGen, Task
def create_task(self, name, src=None, tgt=None):
task = Task.TaskBase.classes[name](self.env, generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
return task
TaskGen.task_gen.create_task = create_task
def hash_constraints(self):
a = self.attr
sum = hash((str(a('before', '')),
str(a('after', '')),
str(a('ext_in', '')),
str(a('ext_out', '')),
self.__class__.maxjobs))
return sum
Task.TaskBase.hash_constraints = hash_constraints
# import cc
# from TaskGen import extension
# import Utils
# @extension(cc.EXT_CC)
# def c_hook(self, node):
# task = self.create_task('cc', node, node.change_ext('.o'))
# try:
# self.compiled_tasks.append(task)
# except AttributeError:
# raise Utils.WafError('Have you forgotten to set the feature "cc" on %s?' % str(self))
# bld = self.bld
# try:
# dc = bld.dc
# except AttributeError:
# dc = bld.dc = {}
# if task.outputs[0].id in dc:
# raise Utils.WafError('Samba, you are doing it wrong %r %s %s' % (task.outputs, task.generator, dc[task.outputs[0].id].generator))
# else:
# dc[task.outputs[0].id] = task
# return task
def suncc_wrap(cls):
'''work around a problem with cc on solaris not handling module aliases
which have empty libs'''
if getattr(cls, 'solaris_wrap', False):
return
cls.solaris_wrap = True
oldrun = cls.run
def run(self):
if self.env.CC_NAME == "sun" and not self.inputs:
self.env = self.env.copy()
self.env.append_value('LINKFLAGS', '-')
return oldrun(self)
cls.run = run
suncc_wrap(Task.TaskBase.classes['cc_link'])
ntdb-1.0/buildtools/wafsamba/samba_patterns.py 0000664 0000000 0000000 00000023657 12241515307 0021634 0 ustar 00root root 0000000 0000000 # a waf tool to add extension based build patterns for Samba
import Task
from TaskGen import extension
from samba_utils import *
from wafsamba import samba_version_file
def write_version_header(task):
'''print version.h contents'''
src = task.inputs[0].srcpath(task.env)
tgt = task.outputs[0].bldpath(task.env)
version = samba_version_file(src, task.env.srcdir, env=task.env, is_install=task.env.is_install)
string = str(version)
f = open(tgt, 'w')
s = f.write(string)
f.close()
return 0
def SAMBA_MKVERSION(bld, target):
'''generate the version.h header for Samba'''
# We only force waf to re-generate this file if we are installing,
# because only then is information not included in the deps (the
# git revision) included in the version.
t = bld.SAMBA_GENERATOR('VERSION',
rule=write_version_header,
source= 'VERSION',
target=target,
always=bld.is_install)
t.env.is_install = bld.is_install
Build.BuildContext.SAMBA_MKVERSION = SAMBA_MKVERSION
def write_build_options_header(fp):
'''write preamble for build_options.c'''
fp.write("/*\n")
fp.write(" Unix SMB/CIFS implementation.\n")
fp.write(" Build Options for Samba Suite\n")
fp.write(" Copyright (C) Vance Lankhaar 2003\n")
fp.write(" Copyright (C) Andrew Bartlett 2001\n")
fp.write("\n")
fp.write(" This program is free software; you can redistribute it and/or modify\n")
fp.write(" it under the terms of the GNU General Public License as published by\n")
fp.write(" the Free Software Foundation; either version 3 of the License, or\n")
fp.write(" (at your option) any later version.\n")
fp.write("\n")
fp.write(" This program is distributed in the hope that it will be useful,\n")
fp.write(" but WITHOUT ANY WARRANTY; without even the implied warranty of\n")
fp.write(" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n")
fp.write(" GNU General Public License for more details.\n")
fp.write("\n")
fp.write(" You should have received a copy of the GNU General Public License\n")
fp.write(" along with this program; if not, see .\n")
fp.write("*/\n")
fp.write("\n")
fp.write("#include \"includes.h\"\n")
fp.write("#include \"build_env.h\"\n")
fp.write("#include \"dynconfig/dynconfig.h\"\n")
fp.write("\n")
fp.write("static int output(bool screen, const char *format, ...) PRINTF_ATTRIBUTE(2,3);\n")
fp.write("void build_options(bool screen);\n")
fp.write("\n")
fp.write("\n")
fp.write("/****************************************************************************\n")
fp.write("helper function for build_options\n")
fp.write("****************************************************************************/\n")
fp.write("static int output(bool screen, const char *format, ...)\n")
fp.write("{\n")
fp.write(" char *ptr = NULL;\n")
fp.write(" int ret = 0;\n")
fp.write(" va_list ap;\n")
fp.write(" \n")
fp.write(" va_start(ap, format);\n")
fp.write(" ret = vasprintf(&ptr,format,ap);\n")
fp.write(" va_end(ap);\n")
fp.write("\n")
fp.write(" if (screen) {\n")
fp.write(" d_printf(\"%s\", ptr ? ptr : \"\");\n")
fp.write(" } else {\n")
fp.write(" DEBUG(4,(\"%s\", ptr ? ptr : \"\"));\n")
fp.write(" }\n")
fp.write(" \n")
fp.write(" SAFE_FREE(ptr);\n")
fp.write(" return ret;\n")
fp.write("}\n")
fp.write("\n")
fp.write("/****************************************************************************\n")
fp.write("options set at build time for the samba suite\n")
fp.write("****************************************************************************/\n")
fp.write("void build_options(bool screen)\n")
fp.write("{\n")
fp.write(" if ((DEBUGLEVEL < 4) && (!screen)) {\n")
fp.write(" return;\n")
fp.write(" }\n")
fp.write("\n")
fp.write("#ifdef _BUILD_ENV_H\n")
fp.write(" /* Output information about the build environment */\n")
fp.write(" output(screen,\"Build environment:\\n\");\n")
fp.write(" output(screen,\" Built by: %s@%s\\n\",BUILD_ENV_USER,BUILD_ENV_HOST);\n")
fp.write(" output(screen,\" Built on: %s\\n\",BUILD_ENV_DATE);\n")
fp.write("\n")
fp.write(" output(screen,\" Built using: %s\\n\",BUILD_ENV_COMPILER);\n")
fp.write(" output(screen,\" Build host: %s\\n\",BUILD_ENV_UNAME);\n")
fp.write(" output(screen,\" SRCDIR: %s\\n\",BUILD_ENV_SRCDIR);\n")
fp.write(" output(screen,\" BUILDDIR: %s\\n\",BUILD_ENV_BUILDDIR);\n")
fp.write("\n")
fp.write("\n")
fp.write("#endif\n")
fp.write("\n")
fp.write(" /* Output various paths to files and directories */\n")
fp.write(" output(screen,\"\\nPaths:\\n\");\n")
fp.write(" output(screen,\" SBINDIR: %s\\n\", get_dyn_SBINDIR());\n")
fp.write(" output(screen,\" BINDIR: %s\\n\", get_dyn_BINDIR());\n")
fp.write(" output(screen,\" CONFIGFILE: %s\\n\", get_dyn_CONFIGFILE());\n")
fp.write(" output(screen,\" LOGFILEBASE: %s\\n\", get_dyn_LOGFILEBASE());\n")
fp.write(" output(screen,\" LMHOSTSFILE: %s\\n\",get_dyn_LMHOSTSFILE());\n")
fp.write(" output(screen,\" LIBDIR: %s\\n\",get_dyn_LIBDIR());\n")
fp.write(" output(screen,\" MODULESDIR: %s\\n\",get_dyn_MODULESDIR());\n")
fp.write(" output(screen,\" SHLIBEXT: %s\\n\",get_dyn_SHLIBEXT());\n")
fp.write(" output(screen,\" LOCKDIR: %s\\n\",get_dyn_LOCKDIR());\n")
fp.write(" output(screen,\" STATEDIR: %s\\n\",get_dyn_STATEDIR());\n")
fp.write(" output(screen,\" CACHEDIR: %s\\n\",get_dyn_CACHEDIR());\n")
fp.write(" output(screen,\" PIDDIR: %s\\n\", get_dyn_PIDDIR());\n")
fp.write(" output(screen,\" SMB_PASSWD_FILE: %s\\n\",get_dyn_SMB_PASSWD_FILE());\n")
fp.write(" output(screen,\" PRIVATE_DIR: %s\\n\",get_dyn_PRIVATE_DIR());\n")
fp.write("\n")
def write_build_options_footer(fp):
fp.write(" /* Output the sizes of the various types */\n")
fp.write(" output(screen, \"\\nType sizes:\\n\");\n")
fp.write(" output(screen, \" sizeof(char): %lu\\n\",(unsigned long)sizeof(char));\n")
fp.write(" output(screen, \" sizeof(int): %lu\\n\",(unsigned long)sizeof(int));\n")
fp.write(" output(screen, \" sizeof(long): %lu\\n\",(unsigned long)sizeof(long));\n")
fp.write("#if HAVE_LONGLONG\n")
fp.write(" output(screen, \" sizeof(long long): %lu\\n\",(unsigned long)sizeof(long long));\n")
fp.write("#endif\n")
fp.write(" output(screen, \" sizeof(uint8): %lu\\n\",(unsigned long)sizeof(uint8));\n")
fp.write(" output(screen, \" sizeof(uint16): %lu\\n\",(unsigned long)sizeof(uint16));\n")
fp.write(" output(screen, \" sizeof(uint32): %lu\\n\",(unsigned long)sizeof(uint32));\n")
fp.write(" output(screen, \" sizeof(short): %lu\\n\",(unsigned long)sizeof(short));\n")
fp.write(" output(screen, \" sizeof(void*): %lu\\n\",(unsigned long)sizeof(void*));\n")
fp.write(" output(screen, \" sizeof(size_t): %lu\\n\",(unsigned long)sizeof(size_t));\n")
fp.write(" output(screen, \" sizeof(off_t): %lu\\n\",(unsigned long)sizeof(off_t));\n")
fp.write(" output(screen, \" sizeof(ino_t): %lu\\n\",(unsigned long)sizeof(ino_t));\n")
fp.write(" output(screen, \" sizeof(dev_t): %lu\\n\",(unsigned long)sizeof(dev_t));\n")
fp.write("\n")
fp.write(" output(screen, \"\\nBuiltin modules:\\n\");\n")
fp.write(" output(screen, \" %s\\n\", STRING_STATIC_MODULES);\n")
fp.write("}\n")
def write_build_options_section(fp, keys, section):
fp.write("\n\t/* Show %s */\n" % section)
fp.write(" output(screen, \"\\n%s:\\n\");\n\n" % section)
for k in sorted(keys):
fp.write("#ifdef %s\n" % k)
fp.write(" output(screen, \" %s\\n\");\n" % k)
fp.write("#endif\n")
fp.write("\n")
def write_build_options(task):
tbl = task.env['defines']
keys_option_with = []
keys_option_utmp = []
keys_option_have = []
keys_header_sys = []
keys_header_other = []
keys_misc = []
for key in tbl:
if key.startswith("HAVE_UT_UT_") or key.find("UTMP") >= 0:
keys_option_utmp.append(key)
elif key.startswith("WITH_"):
keys_option_with.append(key)
elif key.startswith("HAVE_SYS_"):
keys_header_sys.append(key)
elif key.startswith("HAVE_"):
if key.endswith("_H"):
keys_header_other.append(key)
else:
keys_option_have.append(key)
else:
keys_misc.append(key)
tgt = task.outputs[0].bldpath(task.env)
f = open(tgt, 'w')
write_build_options_header(f)
write_build_options_section(f, keys_header_sys, "System Headers")
write_build_options_section(f, keys_header_other, "Headers")
write_build_options_section(f, keys_option_utmp, "UTMP Options")
write_build_options_section(f, keys_option_have, "HAVE_* Defines")
write_build_options_section(f, keys_option_with, "--with Options")
write_build_options_section(f, keys_misc, "Build Options")
write_build_options_footer(f)
f.close()
return 0
def SAMBA_BLDOPTIONS(bld, target):
'''generate the bld_options.c for Samba'''
t = bld.SAMBA_GENERATOR(target,
rule=write_build_options,
target=target,
always=True)
Build.BuildContext.SAMBA_BLDOPTIONS = SAMBA_BLDOPTIONS
ntdb-1.0/buildtools/wafsamba/samba_pidl.py 0000664 0000000 0000000 00000012553 12241515307 0020715 0 ustar 00root root 0000000 0000000 # waf build tool for building IDL files with pidl
from TaskGen import before
import Build, os, sys, Logs
from samba_utils import *
def SAMBA_PIDL(bld, pname, source,
options='',
output_dir='.',
generate_tables=True):
'''Build a IDL file using pidl.
This will produce up to 13 output files depending on the options used'''
bname = source[0:-4]; # strip off the .idl suffix
bname = os.path.basename(bname)
name = "%s_%s" % (pname, bname.upper())
if not SET_TARGET_TYPE(bld, name, 'PIDL'):
return
bld.SET_BUILD_GROUP('build_source')
# the output files depend on the options used. Use this dictionary
# to map between the options and the resulting file names
options_map = { '--header' : '%s.h',
'--ndr-parser' : 'ndr_%s.c ndr_%s.h',
'--samba3-ndr-server' : 'srv_%s.c srv_%s.h',
'--samba3-ndr-client' : 'cli_%s.c cli_%s.h',
'--server' : 'ndr_%s_s.c',
'--client' : 'ndr_%s_c.c ndr_%s_c.h',
'--python' : 'py_%s.c',
'--tdr-parser' : 'tdr_%s.c tdr_%s.h',
'--dcom-proxy' : '%s_p.c',
'--com-header' : 'com_%s.h'
}
table_header_idx = None
out_files = []
options_list = TO_LIST(options)
for o in options_list:
if o in options_map:
ofiles = TO_LIST(options_map[o])
for f in ofiles:
out_files.append(os.path.join(output_dir, f % bname))
if f == 'ndr_%s.h':
# remember this one for the tables generation
table_header_idx = len(out_files) - 1
# depend on the full pidl sources
source = TO_LIST(source)
try:
pidl_src_nodes = bld.pidl_files_cache
except AttributeError:
bld.pidl_files_cache = bld.srcnode.ant_glob('pidl/lib/Parse/**/*.pm', flat=False)
bld.pidl_files_cache.extend(bld.srcnode.ant_glob('pidl', flat=False))
pidl_src_nodes = bld.pidl_files_cache
# the cd .. is needed because pidl currently is sensitive to the directory it is run in
cpp = ""
cc = ""
if bld.CONFIG_SET("CPP") and bld.CONFIG_GET("CPP") != "":
if isinstance(bld.CONFIG_GET("CPP"), list):
cpp = 'CPP="%s"' % " ".join(bld.CONFIG_GET("CPP"))
else:
cpp = 'CPP="%s"' % bld.CONFIG_GET("CPP")
if cpp == "CPP=xlc_r":
cpp = ""
if bld.CONFIG_SET("CC"):
if isinstance(bld.CONFIG_GET("CC"), list):
cc = 'CC="%s"' % " ".join(bld.CONFIG_GET("CC"))
else:
cc = 'CC="%s"' % bld.CONFIG_GET("CC")
t = bld(rule='cd .. && %s %s ${PERL} "${PIDL}" --quiet ${OPTIONS} --outputdir ${OUTPUTDIR} -- "${SRC[0].abspath(env)}"' % (cpp, cc),
ext_out = '.c',
before = 'cc',
on_results = True,
shell = True,
source = source,
target = out_files,
name = name,
samba_type = 'PIDL')
# prime the list of nodes we are dependent on with the cached pidl sources
t.allnodes = pidl_src_nodes
t.env.PIDL = os.path.join(bld.srcnode.abspath(), 'pidl/pidl')
t.env.OPTIONS = TO_LIST(options)
t.env.OUTPUTDIR = bld.bldnode.name + '/' + bld.path.find_dir(output_dir).bldpath(t.env)
if generate_tables and table_header_idx is not None:
pidl_headers = LOCAL_CACHE(bld, 'PIDL_HEADERS')
pidl_headers[name] = [bld.path.find_or_declare(out_files[table_header_idx])]
t.more_includes = '#' + bld.path.relpath_gen(bld.srcnode)
Build.BuildContext.SAMBA_PIDL = SAMBA_PIDL
def SAMBA_PIDL_LIST(bld, name, source,
options='',
output_dir='.',
generate_tables=True):
'''A wrapper for building a set of IDL files'''
for p in TO_LIST(source):
bld.SAMBA_PIDL(name, p, options=options, output_dir=output_dir, generate_tables=generate_tables)
Build.BuildContext.SAMBA_PIDL_LIST = SAMBA_PIDL_LIST
#################################################################
# the rule for generating the NDR tables
from TaskGen import feature, before
@feature('collect')
@before('exec_rule')
def collect(self):
pidl_headers = LOCAL_CACHE(self.bld, 'PIDL_HEADERS')
for (name, hd) in pidl_headers.items():
y = self.bld.name_to_obj(name, self.env)
self.bld.ASSERT(y is not None, 'Failed to find PIDL header %s' % name)
y.post()
for node in hd:
self.bld.ASSERT(node is not None, 'Got None as build node generating PIDL table for %s' % name)
self.source += " " + node.relpath_gen(self.path)
def SAMBA_PIDL_TABLES(bld, name, target):
'''generate the pidl NDR tables file'''
headers = bld.env.PIDL_HEADERS
bld.SET_BUILD_GROUP('main')
t = bld(
features = 'collect',
rule = '${PERL} ${SRC} --output ${TGT} | sed "s|default/||" > ${TGT}',
ext_out = '.c',
before = 'cc',
on_results = True,
shell = True,
source = '../../librpc/tables.pl',
target = target,
name = name)
t.env.LIBRPC = os.path.join(bld.srcnode.abspath(), 'librpc')
Build.BuildContext.SAMBA_PIDL_TABLES = SAMBA_PIDL_TABLES
ntdb-1.0/buildtools/wafsamba/samba_python.py 0000664 0000000 0000000 00000004277 12241515307 0021312 0 ustar 00root root 0000000 0000000 # waf build tool for building IDL files with pidl
import Build
from samba_utils import *
from samba_autoconf import *
from Configure import conf
@conf
def SAMBA_CHECK_PYTHON(conf, mandatory=True, version=(2,4,2)):
# enable tool to build python extensions
conf.find_program('python', var='PYTHON', mandatory=mandatory)
conf.check_tool('python')
path_python = conf.find_program('python')
conf.env.PYTHON_SPECIFIED = (conf.env.PYTHON != path_python)
conf.check_python_version(version)
@conf
def SAMBA_CHECK_PYTHON_HEADERS(conf, mandatory=True):
if conf.env["python_headers_checked"] == []:
conf.check_python_headers(mandatory)
conf.env["python_headers_checked"] = "yes"
else:
conf.msg("python headers", "using cache")
def SAMBA_PYTHON(bld, name,
source='',
deps='',
public_deps='',
realname=None,
cflags='',
includes='',
init_function_sentinel=None,
local_include=True,
vars=None,
enabled=True):
'''build a python extension for Samba'''
# when we support static python modules we'll need to gather
# the list from all the SAMBA_PYTHON() targets
if init_function_sentinel is not None:
cflags += '-DSTATIC_LIBPYTHON_MODULES=%s' % init_function_sentinel
source = bld.EXPAND_VARIABLES(source, vars=vars)
if realname is not None:
link_name = 'python_modules/%s' % realname
else:
link_name = None
bld.SAMBA_LIBRARY(name,
source=source,
deps=deps,
public_deps=public_deps,
includes=includes,
cflags=cflags,
local_include=local_include,
vars=vars,
realname=realname,
link_name=link_name,
pyext=True,
target_type='PYTHON',
install_path='${PYTHONARCHDIR}',
allow_undefined_symbols=True,
enabled=enabled)
Build.BuildContext.SAMBA_PYTHON = SAMBA_PYTHON
ntdb-1.0/buildtools/wafsamba/samba_utils.py 0000664 0000000 0000000 00000051502 12241515307 0021122 0 ustar 00root root 0000000 0000000 # a waf tool to add autoconf-like macros to the configure section
# and for SAMBA_ macros for building libraries, binaries etc
import Build, os, sys, Options, Utils, Task, re, fnmatch, Logs
from TaskGen import feature, before
from Configure import conf, ConfigurationContext
from Logs import debug
import shlex
# TODO: make this a --option
LIB_PATH="shared"
# sigh, python octal constants are a mess
MODE_644 = int('644', 8)
MODE_755 = int('755', 8)
@conf
def SET_TARGET_TYPE(ctx, target, value):
'''set the target type of a target'''
cache = LOCAL_CACHE(ctx, 'TARGET_TYPE')
if target in cache and cache[target] != 'EMPTY':
Logs.error("ERROR: Target '%s' in directory %s re-defined as %s - was %s" % (target, ctx.curdir, value, cache[target]))
sys.exit(1)
LOCAL_CACHE_SET(ctx, 'TARGET_TYPE', target, value)
debug("task_gen: Target '%s' created of type '%s' in %s" % (target, value, ctx.curdir))
return True
def GET_TARGET_TYPE(ctx, target):
'''get target type from cache'''
cache = LOCAL_CACHE(ctx, 'TARGET_TYPE')
if not target in cache:
return None
return cache[target]
######################################################
# this is used as a decorator to make functions only
# run once. Based on the idea from
# http://stackoverflow.com/questions/815110/is-there-a-decorator-to-simply-cache-function-return-values
runonce_ret = {}
def runonce(function):
def runonce_wrapper(*args):
if args in runonce_ret:
return runonce_ret[args]
else:
ret = function(*args)
runonce_ret[args] = ret
return ret
return runonce_wrapper
def ADD_LD_LIBRARY_PATH(path):
'''add something to LD_LIBRARY_PATH'''
if 'LD_LIBRARY_PATH' in os.environ:
oldpath = os.environ['LD_LIBRARY_PATH']
else:
oldpath = ''
newpath = oldpath.split(':')
if not path in newpath:
newpath.append(path)
os.environ['LD_LIBRARY_PATH'] = ':'.join(newpath)
def needs_private_lib(bld, target):
'''return True if a target links to a private library'''
for lib in getattr(target, "final_libs", []):
t = bld.name_to_obj(lib, bld.env)
if t and getattr(t, 'private_library', False):
return True
return False
def install_rpath(target):
'''the rpath value for installation'''
bld = target.bld
bld.env['RPATH'] = []
ret = set()
if bld.env.RPATH_ON_INSTALL:
ret.add(bld.EXPAND_VARIABLES(bld.env.LIBDIR))
if bld.env.RPATH_ON_INSTALL_PRIVATE and needs_private_lib(bld, target):
ret.add(bld.EXPAND_VARIABLES(bld.env.PRIVATELIBDIR))
return list(ret)
def build_rpath(bld):
'''the rpath value for build'''
rpaths = [os.path.normpath('%s/%s' % (bld.env.BUILD_DIRECTORY, d)) for d in ("shared", "shared/private")]
bld.env['RPATH'] = []
if bld.env.RPATH_ON_BUILD:
return rpaths
for rpath in rpaths:
ADD_LD_LIBRARY_PATH(rpath)
return []
@conf
def LOCAL_CACHE(ctx, name):
'''return a named build cache dictionary, used to store
state inside other functions'''
if name in ctx.env:
return ctx.env[name]
ctx.env[name] = {}
return ctx.env[name]
@conf
def LOCAL_CACHE_SET(ctx, cachename, key, value):
'''set a value in a local cache'''
cache = LOCAL_CACHE(ctx, cachename)
cache[key] = value
@conf
def ASSERT(ctx, expression, msg):
'''a build assert call'''
if not expression:
raise Utils.WafError("ERROR: %s\n" % msg)
Build.BuildContext.ASSERT = ASSERT
def SUBDIR(bld, subdir, list):
'''create a list of files by pre-pending each with a subdir name'''
ret = ''
for l in TO_LIST(list):
ret = ret + os.path.normpath(os.path.join(subdir, l)) + ' '
return ret
Build.BuildContext.SUBDIR = SUBDIR
def dict_concat(d1, d2):
'''concatenate two dictionaries d1 += d2'''
for t in d2:
if t not in d1:
d1[t] = d2[t]
def exec_command(self, cmd, **kw):
'''this overrides the 'waf -v' debug output to be in a nice
unix like format instead of a python list.
Thanks to ita on #waf for this'''
import Utils, Logs
_cmd = cmd
if isinstance(cmd, list):
_cmd = ' '.join(cmd)
debug('runner: %s' % _cmd)
if self.log:
self.log.write('%s\n' % cmd)
kw['log'] = self.log
try:
if not kw.get('cwd', None):
kw['cwd'] = self.cwd
except AttributeError:
self.cwd = kw['cwd'] = self.bldnode.abspath()
return Utils.exec_command(cmd, **kw)
Build.BuildContext.exec_command = exec_command
def ADD_COMMAND(opt, name, function):
'''add a new top level command to waf'''
Utils.g_module.__dict__[name] = function
opt.name = function
Options.Handler.ADD_COMMAND = ADD_COMMAND
@feature('cc', 'cshlib', 'cprogram')
@before('apply_core','exec_rule')
def process_depends_on(self):
'''The new depends_on attribute for build rules
allow us to specify a dependency on output from
a source generation rule'''
if getattr(self , 'depends_on', None):
lst = self.to_list(self.depends_on)
for x in lst:
y = self.bld.name_to_obj(x, self.env)
self.bld.ASSERT(y is not None, "Failed to find dependency %s of %s" % (x, self.name))
y.post()
if getattr(y, 'more_includes', None):
self.includes += " " + y.more_includes
os_path_relpath = getattr(os.path, 'relpath', None)
if os_path_relpath is None:
# Python < 2.6 does not have os.path.relpath, provide a replacement
# (imported from Python2.6.5~rc2)
def os_path_relpath(path, start):
"""Return a relative version of a path"""
start_list = os.path.abspath(start).split("/")
path_list = os.path.abspath(path).split("/")
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = ['..'] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return start
return os.path.join(*rel_list)
def unique_list(seq):
'''return a uniquified list in the same order as the existing list'''
seen = {}
result = []
for item in seq:
if item in seen: continue
seen[item] = True
result.append(item)
return result
def TO_LIST(str, delimiter=None):
'''Split a list, preserving quoted strings and existing lists'''
if str is None:
return []
if isinstance(str, list):
return str
if len(str) == 0:
return []
lst = str.split(delimiter)
# the string may have had quotes in it, now we
# check if we did have quotes, and use the slower shlex
# if we need to
for e in lst:
if e[0] == '"':
return shlex.split(str)
return lst
def subst_vars_error(string, env):
'''substitute vars, throw an error if a variable is not defined'''
lst = re.split('(\$\{\w+\})', string)
out = []
for v in lst:
if re.match('\$\{\w+\}', v):
vname = v[2:-1]
if not vname in env:
raise KeyError("Failed to find variable %s in %s" % (vname, string))
v = env[vname]
out.append(v)
return ''.join(out)
@conf
def SUBST_ENV_VAR(ctx, varname):
'''Substitute an environment variable for any embedded variables'''
return subst_vars_error(ctx.env[varname], ctx.env)
Build.BuildContext.SUBST_ENV_VAR = SUBST_ENV_VAR
def ENFORCE_GROUP_ORDERING(bld):
'''enforce group ordering for the project. This
makes the group ordering apply only when you specify
a target with --target'''
if Options.options.compile_targets:
@feature('*')
@before('exec_rule', 'apply_core', 'collect')
def force_previous_groups(self):
if getattr(self.bld, 'enforced_group_ordering', False):
return
self.bld.enforced_group_ordering = True
def group_name(g):
tm = self.bld.task_manager
return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0]
my_id = id(self)
bld = self.bld
stop = None
for g in bld.task_manager.groups:
for t in g.tasks_gen:
if id(t) == my_id:
stop = id(g)
debug('group: Forcing up to group %s for target %s',
group_name(g), self.name or self.target)
break
if stop is not None:
break
if stop is None:
return
for i in xrange(len(bld.task_manager.groups)):
g = bld.task_manager.groups[i]
bld.task_manager.current_group = i
if id(g) == stop:
break
debug('group: Forcing group %s', group_name(g))
for t in g.tasks_gen:
if not getattr(t, 'forced_groups', False):
debug('group: Posting %s', t.name or t.target)
t.forced_groups = True
t.post()
Build.BuildContext.ENFORCE_GROUP_ORDERING = ENFORCE_GROUP_ORDERING
def recursive_dirlist(dir, relbase, pattern=None):
'''recursive directory list'''
ret = []
for f in os.listdir(dir):
f2 = dir + '/' + f
if os.path.isdir(f2):
ret.extend(recursive_dirlist(f2, relbase))
else:
if pattern and not fnmatch.fnmatch(f, pattern):
continue
ret.append(os_path_relpath(f2, relbase))
return ret
def mkdir_p(dir):
'''like mkdir -p'''
if not dir:
return
if dir.endswith("/"):
mkdir_p(dir[:-1])
return
if os.path.isdir(dir):
return
mkdir_p(os.path.dirname(dir))
os.mkdir(dir)
def SUBST_VARS_RECURSIVE(string, env):
'''recursively expand variables'''
if string is None:
return string
limit=100
while (string.find('${') != -1 and limit > 0):
string = subst_vars_error(string, env)
limit -= 1
return string
@conf
def EXPAND_VARIABLES(ctx, varstr, vars=None):
'''expand variables from a user supplied dictionary
This is most useful when you pass vars=locals() to expand
all your local variables in strings
'''
if isinstance(varstr, list):
ret = []
for s in varstr:
ret.append(EXPAND_VARIABLES(ctx, s, vars=vars))
return ret
if not isinstance(varstr, str):
return varstr
import Environment
env = Environment.Environment()
ret = varstr
# substitute on user supplied dict if avaiilable
if vars is not None:
for v in vars.keys():
env[v] = vars[v]
ret = SUBST_VARS_RECURSIVE(ret, env)
# if anything left, subst on the environment as well
if ret.find('${') != -1:
ret = SUBST_VARS_RECURSIVE(ret, ctx.env)
# make sure there is nothing left. Also check for the common
# typo of $( instead of ${
if ret.find('${') != -1 or ret.find('$(') != -1:
Logs.error('Failed to substitute all variables in varstr=%s' % ret)
sys.exit(1)
return ret
Build.BuildContext.EXPAND_VARIABLES = EXPAND_VARIABLES
def RUN_COMMAND(cmd,
env=None,
shell=False):
'''run a external command, return exit code or signal'''
if env:
cmd = SUBST_VARS_RECURSIVE(cmd, env)
status = os.system(cmd)
if os.WIFEXITED(status):
return os.WEXITSTATUS(status)
if os.WIFSIGNALED(status):
return - os.WTERMSIG(status)
Logs.error("Unknown exit reason %d for command: %s" (status, cmd))
return -1
# make sure we have md5. some systems don't have it
try:
from hashlib import md5
# Even if hashlib.md5 exists, it may be unusable.
# Try to use MD5 function. In FIPS mode this will cause an exception
# and we'll get to the replacement code
foo = md5.md5('abcd')
except:
try:
import md5
# repeat the same check here, mere success of import is not enough.
# Try to use MD5 function. In FIPS mode this will cause an exception
foo = md5.md5('abcd')
except:
import Constants
Constants.SIG_NIL = hash('abcd')
class replace_md5(object):
def __init__(self):
self.val = None
def update(self, val):
self.val = hash((self.val, val))
def digest(self):
return str(self.val)
def hexdigest(self):
return self.digest().encode('hex')
def replace_h_file(filename):
f = open(filename, 'rb')
m = replace_md5()
while (filename):
filename = f.read(100000)
m.update(filename)
f.close()
return m.digest()
Utils.md5 = replace_md5
Task.md5 = replace_md5
Utils.h_file = replace_h_file
def LOAD_ENVIRONMENT():
'''load the configuration environment, allowing access to env vars
from new commands'''
import Environment
env = Environment.Environment()
try:
env.load('.lock-wscript')
env.load(env.blddir + '/c4che/default.cache.py')
except:
pass
return env
def IS_NEWER(bld, file1, file2):
'''return True if file1 is newer than file2'''
t1 = os.stat(os.path.join(bld.curdir, file1)).st_mtime
t2 = os.stat(os.path.join(bld.curdir, file2)).st_mtime
return t1 > t2
Build.BuildContext.IS_NEWER = IS_NEWER
@conf
def RECURSE(ctx, directory):
'''recurse into a directory, relative to the curdir or top level'''
try:
visited_dirs = ctx.visited_dirs
except:
visited_dirs = ctx.visited_dirs = set()
d = os.path.join(ctx.curdir, directory)
if os.path.exists(d):
abspath = os.path.abspath(d)
else:
abspath = os.path.abspath(os.path.join(Utils.g_module.srcdir, directory))
ctxclass = ctx.__class__.__name__
key = ctxclass + ':' + abspath
if key in visited_dirs:
# already done it
return
visited_dirs.add(key)
relpath = os_path_relpath(abspath, ctx.curdir)
if ctxclass == 'Handler':
return ctx.sub_options(relpath)
if ctxclass == 'ConfigurationContext':
return ctx.sub_config(relpath)
if ctxclass == 'BuildContext':
return ctx.add_subdirs(relpath)
Logs.error('Unknown RECURSE context class', ctxclass)
raise
Options.Handler.RECURSE = RECURSE
Build.BuildContext.RECURSE = RECURSE
def CHECK_MAKEFLAGS(bld):
'''check for MAKEFLAGS environment variable in case we are being
called from a Makefile try to honor a few make command line flags'''
if not 'WAF_MAKE' in os.environ:
return
makeflags = os.environ.get('MAKEFLAGS')
if makeflags is None:
return
jobs_set = False
# we need to use shlex.split to cope with the escaping of spaces
# in makeflags
for opt in shlex.split(makeflags):
# options can come either as -x or as x
if opt[0:2] == 'V=':
Options.options.verbose = Logs.verbose = int(opt[2:])
if Logs.verbose > 0:
Logs.zones = ['runner']
if Logs.verbose > 2:
Logs.zones = ['*']
elif opt[0].isupper() and opt.find('=') != -1:
# this allows us to set waf options on the make command line
# for example, if you do "make FOO=blah", then we set the
# option 'FOO' in Options.options, to blah. If you look in wafsamba/wscript
# you will see that the command line accessible options have their dest=
# set to uppercase, to allow for passing of options from make in this way
# this is also how "make test TESTS=testpattern" works, and
# "make VERBOSE=1" as well as things like "make SYMBOLCHECK=1"
loc = opt.find('=')
setattr(Options.options, opt[0:loc], opt[loc+1:])
elif opt[0] != '-':
for v in opt:
if v == 'j':
jobs_set = True
elif v == 'k':
Options.options.keep = True
elif opt == '-j':
jobs_set = True
elif opt == '-k':
Options.options.keep = True
if not jobs_set:
# default to one job
Options.options.jobs = 1
Build.BuildContext.CHECK_MAKEFLAGS = CHECK_MAKEFLAGS
option_groups = {}
def option_group(opt, name):
'''find or create an option group'''
global option_groups
if name in option_groups:
return option_groups[name]
gr = opt.add_option_group(name)
option_groups[name] = gr
return gr
Options.Handler.option_group = option_group
def save_file(filename, contents, create_dir=False):
'''save data to a file'''
if create_dir:
mkdir_p(os.path.dirname(filename))
try:
f = open(filename, 'w')
f.write(contents)
f.close()
except:
return False
return True
def load_file(filename):
'''return contents of a file'''
try:
f = open(filename, 'r')
r = f.read()
f.close()
except:
return None
return r
def reconfigure(ctx):
'''rerun configure if necessary'''
import Configure, samba_wildcard, Scripting
if not os.path.exists(".lock-wscript"):
raise Utils.WafError('configure has not been run')
bld = samba_wildcard.fake_build_environment()
Configure.autoconfig = True
Scripting.check_configured(bld)
def map_shlib_extension(ctx, name, python=False):
'''map a filename with a shared library extension of .so to the real shlib name'''
if name is None:
return None
if name[-1:].isdigit():
# some libraries have specified versions in the wscript rule
return name
(root1, ext1) = os.path.splitext(name)
if python:
(root2, ext2) = os.path.splitext(ctx.env.pyext_PATTERN)
else:
(root2, ext2) = os.path.splitext(ctx.env.shlib_PATTERN)
return root1+ext2
Build.BuildContext.map_shlib_extension = map_shlib_extension
def apply_pattern(filename, pattern):
'''apply a filename pattern to a filename that may have a directory component'''
dirname = os.path.dirname(filename)
if not dirname:
return pattern % filename
basename = os.path.basename(filename)
return os.path.join(dirname, pattern % basename)
def make_libname(ctx, name, nolibprefix=False, version=None, python=False):
"""make a library filename
Options:
nolibprefix: don't include the lib prefix
version : add a version number
python : if we should use python module name conventions"""
if python:
libname = apply_pattern(name, ctx.env.pyext_PATTERN)
else:
libname = apply_pattern(name, ctx.env.shlib_PATTERN)
if nolibprefix and libname[0:3] == 'lib':
libname = libname[3:]
if version:
if version[0] == '.':
version = version[1:]
(root, ext) = os.path.splitext(libname)
if ext == ".dylib":
# special case - version goes before the prefix
libname = "%s.%s%s" % (root, version, ext)
else:
libname = "%s%s.%s" % (root, ext, version)
return libname
Build.BuildContext.make_libname = make_libname
def get_tgt_list(bld):
'''return a list of build objects for samba'''
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
# build a list of task generators we are interested in
tgt_list = []
for tgt in targets:
type = targets[tgt]
if not type in ['SUBSYSTEM', 'MODULE', 'BINARY', 'LIBRARY', 'ASN1', 'PYTHON']:
continue
t = bld.name_to_obj(tgt, bld.env)
if t is None:
Logs.error("Target %s of type %s has no task generator" % (tgt, type))
sys.exit(1)
tgt_list.append(t)
return tgt_list
from Constants import WSCRIPT_FILE
def PROCESS_SEPARATE_RULE(self, rule):
''' cause waf to process additional script based on `rule'.
You should have file named wscript__rule in the current directory
where stage is either 'configure' or 'build'
'''
ctxclass = self.__class__.__name__
stage = ''
if ctxclass == 'ConfigurationContext':
stage = 'configure'
elif ctxclass == 'BuildContext':
stage = 'build'
file_path = os.path.join(self.curdir, WSCRIPT_FILE+'_'+stage+'_'+rule)
txt = load_file(file_path)
if txt:
dc = {'ctx': self}
if getattr(self.__class__, 'pre_recurse', None):
dc = self.pre_recurse(txt, file_path, self.curdir)
exec(compile(txt, file_path, 'exec'), dc)
if getattr(self.__class__, 'post_recurse', None):
dc = self.post_recurse(txt, file_path, self.curdir)
Build.BuildContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE
ConfigurationContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE
def AD_DC_BUILD_IS_ENABLED(self):
if self.CONFIG_SET('AD_DC_BUILD_IS_ENABLED'):
return True
return False
Build.BuildContext.AD_DC_BUILD_IS_ENABLED = AD_DC_BUILD_IS_ENABLED
ntdb-1.0/buildtools/wafsamba/samba_version.py 0000664 0000000 0000000 00000025243 12241515307 0021452 0 ustar 00root root 0000000 0000000 import os
import Utils
import samba_utils
import sys
def bzr_version_summary(path):
try:
import bzrlib
except ImportError:
return ("BZR-UNKNOWN", {})
import bzrlib.ui
bzrlib.ui.ui_factory = bzrlib.ui.make_ui_for_terminal(
sys.stdin, sys.stdout, sys.stderr)
from bzrlib import branch, osutils, workingtree
from bzrlib.plugin import load_plugins
load_plugins()
b = branch.Branch.open(path)
(revno, revid) = b.last_revision_info()
rev = b.repository.get_revision(revid)
fields = {
"BZR_REVISION_ID": revid,
"BZR_REVNO": revno,
"COMMIT_DATE": osutils.format_date_with_offset_in_original_timezone(rev.timestamp,
rev.timezone or 0),
"COMMIT_TIME": int(rev.timestamp),
"BZR_BRANCH": rev.properties.get("branch-nick", ""),
}
# If possible, retrieve the git sha
try:
from bzrlib.plugins.git.object_store import get_object_store
except ImportError:
# No git plugin
ret = "BZR-%d" % revno
else:
store = get_object_store(b.repository)
store.lock_read()
try:
full_rev = store._lookup_revision_sha1(revid)
finally:
store.unlock()
fields["GIT_COMMIT_ABBREV"] = full_rev[:7]
fields["GIT_COMMIT_FULLREV"] = full_rev
ret = "GIT-" + fields["GIT_COMMIT_ABBREV"]
if workingtree.WorkingTree.open(path).has_changes():
fields["COMMIT_IS_CLEAN"] = 0
ret += "+"
else:
fields["COMMIT_IS_CLEAN"] = 1
return (ret, fields)
def git_version_summary(path, env=None):
# Get version from GIT
if not 'GIT' in env and os.path.exists("/usr/bin/git"):
# this is useful when doing make dist without configuring
env.GIT = "/usr/bin/git"
if not 'GIT' in env:
return ("GIT-UNKNOWN", {})
environ = dict(os.environ)
environ["GIT_DIR"] = '%s/.git' % path
environ["GIT_WORK_TREE"] = path
git = Utils.cmd_output(env.GIT + ' show --pretty=format:"%h%n%ct%n%H%n%cd" --stat HEAD', silent=True, env=environ)
lines = git.splitlines()
if not lines or len(lines) < 4:
return ("GIT-UNKNOWN", {})
fields = {
"GIT_COMMIT_ABBREV": lines[0],
"GIT_COMMIT_FULLREV": lines[2],
"COMMIT_TIME": int(lines[1]),
"COMMIT_DATE": lines[3],
}
ret = "GIT-" + fields["GIT_COMMIT_ABBREV"]
if env.GIT_LOCAL_CHANGES:
clean = Utils.cmd_output('%s diff HEAD | wc -l' % env.GIT, silent=True).strip()
if clean == "0":
fields["COMMIT_IS_CLEAN"] = 1
else:
fields["COMMIT_IS_CLEAN"] = 0
ret += "+"
return (ret, fields)
def distversion_version_summary(path):
#get version from .distversion file
f = open(path + '/.distversion', 'r')
suffix = None
fields = {}
for line in f:
line = line.strip()
if line == '':
continue
if line.startswith("#"):
continue
try:
split_line = line.split("=")
if split_line[1] != "":
key = split_line[0]
value = split_line[1]
if key == "SUFFIX":
suffix = value
continue
fields[key] = value
except:
print("Failed to parse line %s from .distversion file." % (line))
raise
f.close()
if "COMMIT_TIME" in fields:
fields["COMMIT_TIME"] = int(fields["COMMIT_TIME"])
if suffix is None:
return ("UNKNOWN", fields)
return (suffix, fields)
class SambaVersion(object):
def __init__(self, version_dict, path, env=None, is_install=True):
'''Determine the version number of samba
See VERSION for the format. Entries on that file are
also accepted as dictionary entries here
'''
self.MAJOR=None
self.MINOR=None
self.RELEASE=None
self.REVISION=None
self.TP_RELEASE=None
self.ALPHA_RELEASE=None
self.BETA_RELEASE=None
self.PRE_RELEASE=None
self.RC_RELEASE=None
self.IS_SNAPSHOT=True
self.RELEASE_NICKNAME=None
self.VENDOR_SUFFIX=None
self.VENDOR_PATCH=None
for a, b in version_dict.iteritems():
if a.startswith("SAMBA_VERSION_"):
setattr(self, a[14:], b)
else:
setattr(self, a, b)
if self.IS_GIT_SNAPSHOT == "yes":
self.IS_SNAPSHOT=True
elif self.IS_GIT_SNAPSHOT == "no":
self.IS_SNAPSHOT=False
else:
raise Exception("Unknown value for IS_GIT_SNAPSHOT: %s" % self.IS_GIT_SNAPSHOT)
##
## start with "3.0.22"
##
self.MAJOR=int(self.MAJOR)
self.MINOR=int(self.MINOR)
self.RELEASE=int(self.RELEASE)
SAMBA_VERSION_STRING = ("%u.%u.%u" % (self.MAJOR, self.MINOR, self.RELEASE))
##
## maybe add "3.0.22a" or "4.0.0tp11" or "4.0.0alpha1" or "4.0.0beta1" or "3.0.22pre1" or "3.0.22rc1"
## We do not do pre or rc version on patch/letter releases
##
if self.REVISION is not None:
SAMBA_VERSION_STRING += self.REVISION
if self.TP_RELEASE is not None:
self.TP_RELEASE = int(self.TP_RELEASE)
SAMBA_VERSION_STRING += "tp%u" % self.TP_RELEASE
if self.ALPHA_RELEASE is not None:
self.ALPHA_RELEASE = int(self.ALPHA_RELEASE)
SAMBA_VERSION_STRING += ("alpha%u" % self.ALPHA_RELEASE)
if self.BETA_RELEASE is not None:
self.BETA_RELEASE = int(self.BETA_RELEASE)
SAMBA_VERSION_STRING += ("beta%u" % self.BETA_RELEASE)
if self.PRE_RELEASE is not None:
self.PRE_RELEASE = int(self.PRE_RELEASE)
SAMBA_VERSION_STRING += ("pre%u" % self.PRE_RELEASE)
if self.RC_RELEASE is not None:
self.RC_RELEASE = int(self.RC_RELEASE)
SAMBA_VERSION_STRING += ("rc%u" % self.RC_RELEASE)
if self.IS_SNAPSHOT:
if not is_install:
suffix = "DEVELOPERBUILD"
self.vcs_fields = {}
elif os.path.exists(os.path.join(path, ".git")):
suffix, self.vcs_fields = git_version_summary(path, env=env)
elif os.path.exists(os.path.join(path, ".bzr")):
suffix, self.vcs_fields = bzr_version_summary(path)
elif os.path.exists(os.path.join(path, ".distversion")):
suffix, self.vcs_fields = distversion_version_summary(path)
else:
suffix = "UNKNOWN"
self.vcs_fields = {}
self.vcs_fields["SUFFIX"] = suffix
SAMBA_VERSION_STRING += "-" + suffix
else:
self.vcs_fields = {}
self.OFFICIAL_STRING = SAMBA_VERSION_STRING
if self.VENDOR_SUFFIX is not None:
SAMBA_VERSION_STRING += ("-" + self.VENDOR_SUFFIX)
self.VENDOR_SUFFIX = self.VENDOR_SUFFIX
if self.VENDOR_PATCH is not None:
SAMBA_VERSION_STRING += ("-" + self.VENDOR_PATCH)
self.VENDOR_PATCH = self.VENDOR_PATCH
self.STRING = SAMBA_VERSION_STRING
if self.RELEASE_NICKNAME is not None:
self.STRING_WITH_NICKNAME = "%s (%s)" % (self.STRING, self.RELEASE_NICKNAME)
else:
self.STRING_WITH_NICKNAME = self.STRING
def __str__(self):
string="/* Autogenerated by waf */\n"
string+="#define SAMBA_VERSION_MAJOR %u\n" % self.MAJOR
string+="#define SAMBA_VERSION_MINOR %u\n" % self.MINOR
string+="#define SAMBA_VERSION_RELEASE %u\n" % self.RELEASE
if self.REVISION is not None:
string+="#define SAMBA_VERSION_REVISION %u\n" % self.REVISION
if self.TP_RELEASE is not None:
string+="#define SAMBA_VERSION_TP_RELEASE %u\n" % self.TP_RELEASE
if self.ALPHA_RELEASE is not None:
string+="#define SAMBA_VERSION_ALPHA_RELEASE %u\n" % self.ALPHA_RELEASE
if self.BETA_RELEASE is not None:
string+="#define SAMBA_VERSION_BETA_RELEASE %u\n" % self.BETA_RELEASE
if self.PRE_RELEASE is not None:
string+="#define SAMBA_VERSION_PRE_RELEASE %u\n" % self.PRE_RELEASE
if self.RC_RELEASE is not None:
string+="#define SAMBA_VERSION_RC_RELEASE %u\n" % self.RC_RELEASE
for name in sorted(self.vcs_fields.keys()):
string+="#define SAMBA_VERSION_%s " % name
value = self.vcs_fields[name]
if isinstance(value, basestring):
string += "\"%s\"" % value
elif type(value) is int:
string += "%d" % value
else:
raise Exception("Unknown type for %s: %r" % (name, value))
string += "\n"
string+="#define SAMBA_VERSION_OFFICIAL_STRING \"" + self.OFFICIAL_STRING + "\"\n"
if self.VENDOR_SUFFIX is not None:
string+="#define SAMBA_VERSION_VENDOR_SUFFIX " + self.VENDOR_SUFFIX + "\n"
if self.VENDOR_PATCH is not None:
string+="#define SAMBA_VERSION_VENDOR_PATCH " + self.VENDOR_PATCH + "\n"
if self.RELEASE_NICKNAME is not None:
string+="#define SAMBA_VERSION_RELEASE_NICKNAME " + self.RELEASE_NICKNAME + "\n"
# We need to put this #ifdef in to the headers so that vendors can override the version with a function
string+='''
#ifdef SAMBA_VERSION_VENDOR_FUNCTION
# define SAMBA_VERSION_STRING SAMBA_VERSION_VENDOR_FUNCTION
#else /* SAMBA_VERSION_VENDOR_FUNCTION */
# define SAMBA_VERSION_STRING "''' + self.STRING_WITH_NICKNAME + '''"
#endif
'''
string+="/* Version for mkrelease.sh: \nSAMBA_VERSION_STRING=" + self.STRING_WITH_NICKNAME + "\n */\n"
return string
def samba_version_file(version_file, path, env=None, is_install=True):
'''Parse the version information from a VERSION file'''
f = open(version_file, 'r')
version_dict = {}
for line in f:
line = line.strip()
if line == '':
continue
if line.startswith("#"):
continue
try:
split_line = line.split("=")
if split_line[1] != "":
value = split_line[1].strip('"')
version_dict[split_line[0]] = value
except:
print("Failed to parse line %s from %s" % (line, version_file))
raise
return SambaVersion(version_dict, path, env=env, is_install=is_install)
def load_version(env=None, is_install=True):
'''load samba versions either from ./VERSION or git
return a version object for detailed breakdown'''
if not env:
env = samba_utils.LOAD_ENVIRONMENT()
version = samba_version_file("./VERSION", ".", env, is_install=is_install)
Utils.g_module.VERSION = version.STRING
return version
ntdb-1.0/buildtools/wafsamba/samba_wildcard.py 0000664 0000000 0000000 00000010666 12241515307 0021561 0 ustar 00root root 0000000 0000000 # based on playground/evil in the waf svn tree
import os, datetime
import Scripting, Utils, Options, Logs, Environment, fnmatch
from Constants import *
from samba_utils import *
def run_task(t, k):
'''run a single build task'''
ret = t.run()
if ret:
raise Utils.WafError("Failed to build %s: %u" % (k, ret))
def run_named_build_task(cmd):
'''run a named build task, matching the cmd name using fnmatch
wildcards against inputs and outputs of all build tasks'''
bld = fake_build_environment(info=False)
found = False
cwd_node = bld.root.find_dir(os.getcwd())
top_node = bld.root.find_dir(bld.srcnode.abspath())
cmd = os.path.normpath(cmd)
# cope with builds of bin/*/*
if os.path.islink(cmd):
cmd = os_path_relpath(os.readlink(cmd), os.getcwd())
if cmd[0:12] == "bin/default/":
cmd = cmd[12:]
for g in bld.task_manager.groups:
for attr in ['outputs', 'inputs']:
for t in g.tasks:
s = getattr(t, attr, [])
for k in s:
relpath1 = k.relpath_gen(cwd_node)
relpath2 = k.relpath_gen(top_node)
if (fnmatch.fnmatch(relpath1, cmd) or
fnmatch.fnmatch(relpath2, cmd)):
t.position = [0,0]
print(t.display())
run_task(t, k)
found = True
if not found:
raise Utils.WafError("Unable to find build target matching %s" % cmd)
def rewrite_compile_targets():
'''cope with the bin/ form of compile target'''
if not Options.options.compile_targets:
return
bld = fake_build_environment(info=False)
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
tlist = []
for t in Options.options.compile_targets.split(','):
if not os.path.islink(t):
tlist.append(t)
continue
link = os.readlink(t)
list = link.split('/')
for name in [list[-1], '/'.join(list[-2:])]:
if name in targets:
tlist.append(name)
continue
Options.options.compile_targets = ",".join(tlist)
def wildcard_main(missing_cmd_fn):
'''this replaces main from Scripting, allowing us to override the
behaviour for unknown commands
If a unknown command is found, then missing_cmd_fn() is called with
the name of the requested command
'''
Scripting.commands = Options.arg_line[:]
# rewrite the compile targets to cope with the bin/xx form
rewrite_compile_targets()
while Scripting.commands:
x = Scripting.commands.pop(0)
ini = datetime.datetime.now()
if x == 'configure':
fun = Scripting.configure
elif x == 'build':
fun = Scripting.build
else:
fun = getattr(Utils.g_module, x, None)
# this is the new addition on top of main from Scripting.py
if not fun:
missing_cmd_fn(x)
break
ctx = getattr(Utils.g_module, x + '_context', Utils.Context)()
if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']:
try:
fun(ctx)
except TypeError:
fun()
else:
fun(ctx)
ela = ''
if not Options.options.progress_bar:
ela = ' (%s)' % Utils.get_elapsed_time(ini)
if x != 'init' and x != 'shutdown':
Logs.info('%r finished successfully%s' % (x, ela))
if not Scripting.commands and x != 'shutdown':
Scripting.commands.append('shutdown')
def fake_build_environment(info=True, flush=False):
"""create all the tasks for the project, but do not run the build
return the build context in use"""
bld = getattr(Utils.g_module, 'build_context', Utils.Context)()
bld = Scripting.check_configured(bld)
Options.commands['install'] = False
Options.commands['uninstall'] = False
Options.is_install = False
bld.is_install = 0 # False
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError("Project not configured (run 'waf configure' first)")
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
if info:
Logs.info("Waf: Entering directory `%s'" % bld.bldnode.abspath())
bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
bld.pre_build()
if flush:
bld.flush()
return bld
ntdb-1.0/buildtools/wafsamba/stale_files.py 0000664 0000000 0000000 00000007640 12241515307 0021115 0 ustar 00root root 0000000 0000000 # encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
Add a pre-build hook to remove all build files
which do not have a corresponding target
This can be used for example to remove the targets
that have changed name without performing
a full 'waf clean'
Of course, it will only work if there are no dynamically generated
nodes/tasks, in which case the method will have to be modified
to exclude some folders for example.
"""
import Logs, Build, os, samba_utils, Options, Utils
from Runner import Parallel
old_refill_task_list = Parallel.refill_task_list
def replace_refill_task_list(self):
'''replacement for refill_task_list() that deletes stale files'''
iit = old_refill_task_list(self)
bld = self.bld
if not getattr(bld, 'new_rules', False):
# we only need to check for stale files if the build rules changed
return iit
if Options.options.compile_targets:
# not safe when --target is used
return iit
# execute only once
if getattr(self, 'cleanup_done', False):
return iit
self.cleanup_done = True
def group_name(g):
tm = self.bld.task_manager
return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0]
bin_base = bld.bldnode.abspath()
bin_base_len = len(bin_base)
# paranoia
if bin_base[-4:] != '/bin':
raise Utils.WafError("Invalid bin base: %s" % bin_base)
# obtain the expected list of files
expected = []
for i in range(len(bld.task_manager.groups)):
g = bld.task_manager.groups[i]
tasks = g.tasks_gen
for x in tasks:
try:
if getattr(x, 'target'):
tlist = samba_utils.TO_LIST(getattr(x, 'target'))
ttype = getattr(x, 'samba_type', None)
task_list = getattr(x, 'compiled_tasks', [])
if task_list:
# this gets all of the .o files, including the task
# ids, so foo.c maps to foo_3.o for idx=3
for tsk in task_list:
for output in tsk.outputs:
objpath = os.path.normpath(output.abspath(bld.env))
expected.append(objpath)
for t in tlist:
if ttype in ['LIBRARY','MODULE']:
t = samba_utils.apply_pattern(t, bld.env.shlib_PATTERN)
if ttype == 'PYTHON':
t = samba_utils.apply_pattern(t, bld.env.pyext_PATTERN)
p = os.path.join(x.path.abspath(bld.env), t)
p = os.path.normpath(p)
expected.append(p)
for n in x.allnodes:
p = n.abspath(bld.env)
if p[0:bin_base_len] == bin_base:
expected.append(p)
except:
pass
for root, dirs, files in os.walk(bin_base):
for f in files:
p = root + '/' + f
if os.path.islink(p):
link = os.readlink(p)
if link[0:bin_base_len] == bin_base:
p = link
if f in ['config.h']:
continue
(froot, fext) = os.path.splitext(f)
if fext not in [ '.c', '.h', '.so', '.o' ]:
continue
if f[-7:] == '.inst.h':
continue
if p.find("/.conf") != -1:
continue
if not p in expected and os.path.exists(p):
Logs.warn("Removing stale file: %s" % p)
os.unlink(p)
return iit
def AUTOCLEANUP_STALE_FILES(bld):
"""automatically clean up any files in bin that shouldn't be there"""
old_refill_task_list = Parallel.refill_task_list
Parallel.refill_task_list = replace_refill_task_list
Parallel.bld = bld
Build.BuildContext.AUTOCLEANUP_STALE_FILES = AUTOCLEANUP_STALE_FILES
ntdb-1.0/buildtools/wafsamba/symbols.py 0000664 0000000 0000000 00000053064 12241515307 0020314 0 ustar 00root root 0000000 0000000 # a waf tool to extract symbols from object files or libraries
# using nm, producing a set of exposed defined/undefined symbols
import Utils, Build, subprocess, Logs, re
from samba_wildcard import fake_build_environment
from samba_utils import *
# these are the data structures used in symbols.py:
#
# bld.env.symbol_map : dictionary mapping public symbol names to list of
# subsystem names where that symbol exists
#
# t.in_library : list of libraries that t is in
#
# bld.env.public_symbols: set of public symbols for each subsystem
# bld.env.used_symbols : set of used symbols for each subsystem
#
# bld.env.syslib_symbols: dictionary mapping system library name to set of symbols
# for that library
# bld.env.library_dict : dictionary mapping built library paths to subsystem names
#
# LOCAL_CACHE(bld, 'TARGET_TYPE') : dictionary mapping subsystem name to target type
def symbols_extract(bld, objfiles, dynamic=False):
'''extract symbols from objfile, returning a dictionary containing
the set of undefined and public symbols for each file'''
ret = {}
# see if we can get some results from the nm cache
if not bld.env.nm_cache:
bld.env.nm_cache = {}
objfiles = set(objfiles).copy()
remaining = set()
for obj in objfiles:
if obj in bld.env.nm_cache:
ret[obj] = bld.env.nm_cache[obj].copy()
else:
remaining.add(obj)
objfiles = remaining
if len(objfiles) == 0:
return ret
cmd = ["nm"]
if dynamic:
# needed for some .so files
cmd.append("-D")
cmd.extend(list(objfiles))
nmpipe = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
if len(objfiles) == 1:
filename = list(objfiles)[0]
ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set()}
for line in nmpipe:
line = line.strip()
if line.endswith(':'):
filename = line[:-1]
ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set() }
continue
cols = line.split(" ")
if cols == ['']:
continue
# see if the line starts with an address
if len(cols) == 3:
symbol_type = cols[1]
symbol = cols[2]
else:
symbol_type = cols[0]
symbol = cols[1]
if symbol_type in "BDGTRVWSi":
# its a public symbol
ret[filename]["PUBLIC"].add(symbol)
elif symbol_type in "U":
ret[filename]["UNDEFINED"].add(symbol)
# add to the cache
for obj in objfiles:
if obj in ret:
bld.env.nm_cache[obj] = ret[obj].copy()
else:
bld.env.nm_cache[obj] = { "PUBLIC": set(), "UNDEFINED" : set() }
return ret
def real_name(name):
if name.find(".objlist") != -1:
name = name[:-8]
return name
def find_ldd_path(bld, libname, binary):
'''find the path to the syslib we will link against'''
ret = None
if not bld.env.syslib_paths:
bld.env.syslib_paths = {}
if libname in bld.env.syslib_paths:
return bld.env.syslib_paths[libname]
lddpipe = subprocess.Popen(['ldd', binary], stdout=subprocess.PIPE).stdout
for line in lddpipe:
line = line.strip()
cols = line.split(" ")
if len(cols) < 3 or cols[1] != "=>":
continue
if cols[0].startswith("libc."):
# save this one too
bld.env.libc_path = cols[2]
if cols[0].startswith(libname):
ret = cols[2]
bld.env.syslib_paths[libname] = ret
return ret
# some regular expressions for parsing readelf output
re_sharedlib = re.compile('Shared library: \[(.*)\]')
re_rpath = re.compile('Library rpath: \[(.*)\]')
def get_libs(bld, binname):
'''find the list of linked libraries for any binary or library
binname is the path to the binary/library on disk
We do this using readelf instead of ldd as we need to avoid recursing
into system libraries
'''
# see if we can get the result from the ldd cache
if not bld.env.lib_cache:
bld.env.lib_cache = {}
if binname in bld.env.lib_cache:
return bld.env.lib_cache[binname].copy()
rpath = []
libs = set()
elfpipe = subprocess.Popen(['readelf', '--dynamic', binname], stdout=subprocess.PIPE).stdout
for line in elfpipe:
m = re_sharedlib.search(line)
if m:
libs.add(m.group(1))
m = re_rpath.search(line)
if m:
rpath.extend(m.group(1).split(":"))
ret = set()
for lib in libs:
found = False
for r in rpath:
path = os.path.join(r, lib)
if os.path.exists(path):
ret.add(os.path.realpath(path))
found = True
break
if not found:
# we didn't find this lib using rpath. It is probably a system
# library, so to find the path to it we either need to use ldd
# or we need to start parsing /etc/ld.so.conf* ourselves. We'll
# use ldd for now, even though it is slow
path = find_ldd_path(bld, lib, binname)
if path:
ret.add(os.path.realpath(path))
bld.env.lib_cache[binname] = ret.copy()
return ret
def get_libs_recursive(bld, binname, seen):
'''find the recursive list of linked libraries for any binary or library
binname is the path to the binary/library on disk. seen is a set used
to prevent loops
'''
if binname in seen:
return set()
ret = get_libs(bld, binname)
seen.add(binname)
for lib in ret:
# we don't want to recurse into system libraries. If a system
# library that we use (eg. libcups) happens to use another library
# (such as libkrb5) which contains common symbols with our own
# libraries, then that is not an error
if lib in bld.env.library_dict:
ret = ret.union(get_libs_recursive(bld, lib, seen))
return ret
def find_syslib_path(bld, libname, deps):
'''find the path to the syslib we will link against'''
# the strategy is to use the targets that depend on the library, and run ldd
# on it to find the real location of the library that is used
linkpath = deps[0].link_task.outputs[0].abspath(bld.env)
if libname == "python":
libname += bld.env.PYTHON_VERSION
return find_ldd_path(bld, "lib%s" % libname.lower(), linkpath)
def build_symbol_sets(bld, tgt_list):
'''build the public_symbols and undefined_symbols attributes for each target'''
if bld.env.public_symbols:
return
objlist = [] # list of object file
objmap = {} # map from object filename to target (subsystem) name
for t in tgt_list:
t.public_symbols = set()
t.undefined_symbols = set()
t.used_symbols = set()
for tsk in getattr(t, 'compiled_tasks', []):
for output in tsk.outputs:
objpath = output.abspath(bld.env)
objlist.append(objpath)
objmap[objpath] = t
symbols = symbols_extract(bld, objlist)
for obj in objlist:
t = objmap[obj]
t.public_symbols = t.public_symbols.union(symbols[obj]["PUBLIC"])
t.undefined_symbols = t.undefined_symbols.union(symbols[obj]["UNDEFINED"])
t.used_symbols = t.used_symbols.union(symbols[obj]["UNDEFINED"])
t.undefined_symbols = t.undefined_symbols.difference(t.public_symbols)
# and the reverse map of public symbols to subsystem name
bld.env.symbol_map = {}
for t in tgt_list:
for s in t.public_symbols:
if not s in bld.env.symbol_map:
bld.env.symbol_map[s] = []
bld.env.symbol_map[s].append(real_name(t.sname))
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
bld.env.public_symbols = {}
for t in tgt_list:
name = real_name(t.sname)
if name in bld.env.public_symbols:
bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t.public_symbols)
else:
bld.env.public_symbols[name] = t.public_symbols
if t.samba_type == 'LIBRARY':
for dep in t.add_objects:
t2 = bld.name_to_obj(dep, bld.env)
bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep))
bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t2.public_symbols)
bld.env.used_symbols = {}
for t in tgt_list:
name = real_name(t.sname)
if name in bld.env.used_symbols:
bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t.used_symbols)
else:
bld.env.used_symbols[name] = t.used_symbols
if t.samba_type == 'LIBRARY':
for dep in t.add_objects:
t2 = bld.name_to_obj(dep, bld.env)
bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep))
bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t2.used_symbols)
def build_library_dict(bld, tgt_list):
'''build the library_dict dictionary'''
if bld.env.library_dict:
return
bld.env.library_dict = {}
for t in tgt_list:
if t.samba_type in [ 'LIBRARY', 'PYTHON' ]:
linkpath = os.path.realpath(t.link_task.outputs[0].abspath(bld.env))
bld.env.library_dict[linkpath] = t.sname
def build_syslib_sets(bld, tgt_list):
'''build the public_symbols for all syslibs'''
if bld.env.syslib_symbols:
return
# work out what syslibs we depend on, and what targets those are used in
syslibs = {}
objmap = {}
for t in tgt_list:
if getattr(t, 'uselib', []) and t.samba_type in [ 'LIBRARY', 'BINARY', 'PYTHON' ]:
for lib in t.uselib:
if lib in ['PYEMBED', 'PYEXT']:
lib = "python"
if not lib in syslibs:
syslibs[lib] = []
syslibs[lib].append(t)
# work out the paths to each syslib
syslib_paths = []
for lib in syslibs:
path = find_syslib_path(bld, lib, syslibs[lib])
if path is None:
Logs.warn("Unable to find syslib path for %s" % lib)
if path is not None:
syslib_paths.append(path)
objmap[path] = lib.lower()
# add in libc
syslib_paths.append(bld.env.libc_path)
objmap[bld.env.libc_path] = 'c'
symbols = symbols_extract(bld, syslib_paths, dynamic=True)
# keep a map of syslib names to public symbols
bld.env.syslib_symbols = {}
for lib in symbols:
bld.env.syslib_symbols[lib] = symbols[lib]["PUBLIC"]
# add to the map of symbols to dependencies
for lib in symbols:
for sym in symbols[lib]["PUBLIC"]:
if not sym in bld.env.symbol_map:
bld.env.symbol_map[sym] = []
bld.env.symbol_map[sym].append(objmap[lib])
# keep the libc symbols as well, as these are useful for some of the
# sanity checks
bld.env.libc_symbols = symbols[bld.env.libc_path]["PUBLIC"]
# add to the combined map of dependency name to public_symbols
for lib in bld.env.syslib_symbols:
bld.env.public_symbols[objmap[lib]] = bld.env.syslib_symbols[lib]
def build_autodeps(bld, t):
'''build the set of dependencies for a target'''
deps = set()
name = real_name(t.sname)
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
for sym in t.undefined_symbols:
if sym in t.public_symbols:
continue
if sym in bld.env.symbol_map:
depname = bld.env.symbol_map[sym]
if depname == [ name ]:
# self dependencies aren't interesting
continue
if t.in_library == depname:
# no need to depend on the library we are part of
continue
if depname[0] in ['c', 'python']:
# these don't go into autodeps
continue
if targets[depname[0]] in [ 'SYSLIB' ]:
deps.add(depname[0])
continue
t2 = bld.name_to_obj(depname[0], bld.env)
if len(t2.in_library) != 1:
deps.add(depname[0])
continue
if t2.in_library == t.in_library:
# if we're part of the same library, we don't need to autodep
continue
deps.add(t2.in_library[0])
t.autodeps = deps
def build_library_names(bld, tgt_list):
'''add a in_library attribute to all targets that are part of a library'''
if bld.env.done_build_library_names:
return
for t in tgt_list:
t.in_library = []
for t in tgt_list:
if t.samba_type in [ 'LIBRARY' ]:
for obj in t.samba_deps_extended:
t2 = bld.name_to_obj(obj, bld.env)
if t2 and t2.samba_type in [ 'SUBSYSTEM', 'ASN1' ]:
if not t.sname in t2.in_library:
t2.in_library.append(t.sname)
bld.env.done_build_library_names = True
def check_library_deps(bld, t):
'''check that all the autodeps that have mutual dependency of this
target are in the same library as the target'''
name = real_name(t.sname)
if len(t.in_library) > 1:
Logs.warn("WARNING: Target '%s' in multiple libraries: %s" % (t.sname, t.in_library))
for dep in t.autodeps:
t2 = bld.name_to_obj(dep, bld.env)
if t2 is None:
continue
for dep2 in t2.autodeps:
if dep2 == name and t.in_library != t2.in_library:
Logs.warn("WARNING: mutual dependency %s <=> %s" % (name, real_name(t2.sname)))
Logs.warn("Libraries should match. %s != %s" % (t.in_library, t2.in_library))
# raise Utils.WafError("illegal mutual dependency")
def check_syslib_collisions(bld, tgt_list):
'''check if a target has any symbol collisions with a syslib
We do not want any code in Samba to use a symbol name from a
system library. The chance of that causing problems is just too
high. Note that libreplace uses a rep_XX approach of renaming
symbols via macros
'''
has_error = False
for t in tgt_list:
for lib in bld.env.syslib_symbols:
common = t.public_symbols.intersection(bld.env.syslib_symbols[lib])
if common:
Logs.error("ERROR: Target '%s' has symbols '%s' which is also in syslib '%s'" % (t.sname, common, lib))
has_error = True
if has_error:
raise Utils.WafError("symbols in common with system libraries")
def check_dependencies(bld, t):
'''check for depenencies that should be changed'''
if bld.name_to_obj(t.sname + ".objlist", bld.env):
return
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
remaining = t.undefined_symbols.copy()
remaining = remaining.difference(t.public_symbols)
sname = real_name(t.sname)
deps = set(t.samba_deps)
for d in t.samba_deps:
if targets[d] in [ 'EMPTY', 'DISABLED', 'SYSLIB', 'GENERATOR' ]:
continue
bld.ASSERT(d in bld.env.public_symbols, "Failed to find symbol list for dependency '%s'" % d)
diff = remaining.intersection(bld.env.public_symbols[d])
if not diff and targets[sname] != 'LIBRARY':
Logs.info("Target '%s' has no dependency on %s" % (sname, d))
else:
remaining = remaining.difference(diff)
t.unsatisfied_symbols = set()
needed = {}
for sym in remaining:
if sym in bld.env.symbol_map:
dep = bld.env.symbol_map[sym]
if not dep[0] in needed:
needed[dep[0]] = set()
needed[dep[0]].add(sym)
else:
t.unsatisfied_symbols.add(sym)
for dep in needed:
Logs.info("Target '%s' should add dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep])))
def check_syslib_dependencies(bld, t):
'''check for syslib depenencies'''
if bld.name_to_obj(t.sname + ".objlist", bld.env):
return
sname = real_name(t.sname)
remaining = set()
features = TO_LIST(t.features)
if 'pyembed' in features or 'pyext' in features:
if 'python' in bld.env.public_symbols:
t.unsatisfied_symbols = t.unsatisfied_symbols.difference(bld.env.public_symbols['python'])
needed = {}
for sym in t.unsatisfied_symbols:
if sym in bld.env.symbol_map:
dep = bld.env.symbol_map[sym][0]
if dep == 'c':
continue
if not dep in needed:
needed[dep] = set()
needed[dep].add(sym)
else:
remaining.add(sym)
for dep in needed:
Logs.info("Target '%s' should add syslib dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep])))
if remaining:
debug("deps: Target '%s' has unsatisfied symbols: %s" % (sname, " ".join(remaining)))
def symbols_symbolcheck(task):
'''check the internal dependency lists'''
bld = task.env.bld
tgt_list = get_tgt_list(bld)
build_symbol_sets(bld, tgt_list)
build_library_names(bld, tgt_list)
for t in tgt_list:
t.autodeps = set()
if getattr(t, 'source', ''):
build_autodeps(bld, t)
for t in tgt_list:
check_dependencies(bld, t)
for t in tgt_list:
check_library_deps(bld, t)
def symbols_syslibcheck(task):
'''check the syslib dependencies'''
bld = task.env.bld
tgt_list = get_tgt_list(bld)
build_syslib_sets(bld, tgt_list)
check_syslib_collisions(bld, tgt_list)
for t in tgt_list:
check_syslib_dependencies(bld, t)
def symbols_whyneeded(task):
"""check why 'target' needs to link to 'subsystem'"""
bld = task.env.bld
tgt_list = get_tgt_list(bld)
why = Options.options.WHYNEEDED.split(":")
if len(why) != 2:
raise Utils.WafError("usage: WHYNEEDED=TARGET:DEPENDENCY")
target = why[0]
subsystem = why[1]
build_symbol_sets(bld, tgt_list)
build_library_names(bld, tgt_list)
build_syslib_sets(bld, tgt_list)
Logs.info("Checking why %s needs to link to %s" % (target, subsystem))
if not target in bld.env.used_symbols:
Logs.warn("unable to find target '%s' in used_symbols dict" % target)
return
if not subsystem in bld.env.public_symbols:
Logs.warn("unable to find subsystem '%s' in public_symbols dict" % subsystem)
return
overlap = bld.env.used_symbols[target].intersection(bld.env.public_symbols[subsystem])
if not overlap:
Logs.info("target '%s' doesn't use any public symbols from '%s'" % (target, subsystem))
else:
Logs.info("target '%s' uses symbols %s from '%s'" % (target, overlap, subsystem))
def report_duplicate(bld, binname, sym, libs, fail_on_error):
'''report duplicated symbols'''
if sym in ['_init', '_fini', '_edata', '_end', '__bss_start']:
return
libnames = []
for lib in libs:
if lib in bld.env.library_dict:
libnames.append(bld.env.library_dict[lib])
else:
libnames.append(lib)
if fail_on_error:
raise Utils.WafError("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames))
else:
print("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames))
def symbols_dupcheck_binary(bld, binname, fail_on_error):
'''check for duplicated symbols in one binary'''
libs = get_libs_recursive(bld, binname, set())
symlist = symbols_extract(bld, libs, dynamic=True)
symmap = {}
for libpath in symlist:
for sym in symlist[libpath]['PUBLIC']:
if sym == '_GLOBAL_OFFSET_TABLE_':
continue
if not sym in symmap:
symmap[sym] = set()
symmap[sym].add(libpath)
for sym in symmap:
if len(symmap[sym]) > 1:
for libpath in symmap[sym]:
if libpath in bld.env.library_dict:
report_duplicate(bld, binname, sym, symmap[sym], fail_on_error)
break
def symbols_dupcheck(task, fail_on_error=False):
'''check for symbols defined in two different subsystems'''
bld = task.env.bld
tgt_list = get_tgt_list(bld)
targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
build_library_dict(bld, tgt_list)
for t in tgt_list:
if t.samba_type == 'BINARY':
binname = os_path_relpath(t.link_task.outputs[0].abspath(bld.env), os.getcwd())
symbols_dupcheck_binary(bld, binname, fail_on_error)
def symbols_dupcheck_fatal(task):
'''check for symbols defined in two different subsystems (and fail if duplicates are found)'''
symbols_dupcheck(task, fail_on_error=True)
def SYMBOL_CHECK(bld):
'''check our dependency lists'''
if Options.options.SYMBOLCHECK:
bld.SET_BUILD_GROUP('symbolcheck')
task = bld(rule=symbols_symbolcheck, always=True, name='symbol checking')
task.env.bld = bld
bld.SET_BUILD_GROUP('syslibcheck')
task = bld(rule=symbols_syslibcheck, always=True, name='syslib checking')
task.env.bld = bld
bld.SET_BUILD_GROUP('syslibcheck')
task = bld(rule=symbols_dupcheck, always=True, name='symbol duplicate checking')
task.env.bld = bld
if Options.options.WHYNEEDED:
bld.SET_BUILD_GROUP('syslibcheck')
task = bld(rule=symbols_whyneeded, always=True, name='check why a dependency is needed')
task.env.bld = bld
Build.BuildContext.SYMBOL_CHECK = SYMBOL_CHECK
def DUP_SYMBOL_CHECK(bld):
if Options.options.DUP_SYMBOLCHECK and bld.env.DEVELOPER:
'''check for duplicate symbols'''
bld.SET_BUILD_GROUP('syslibcheck')
task = bld(rule=symbols_dupcheck_fatal, always=True, name='symbol duplicate checking')
task.env.bld = bld
Build.BuildContext.DUP_SYMBOL_CHECK = DUP_SYMBOL_CHECK
ntdb-1.0/buildtools/wafsamba/tests/ 0000775 0000000 0000000 00000000000 12241515307 0017404 5 ustar 00root root 0000000 0000000 ntdb-1.0/buildtools/wafsamba/tests/__init__.py 0000664 0000000 0000000 00000002240 12241515307 0021513 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Jelmer Vernooij
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Tests for wafsamba."""
from unittest import (
TestCase,
TestLoader,
)
def test_suite():
names = [
'abi',
'bundled',
'utils',
]
module_names = ['wafsamba.tests.test_' + name for name in names]
loader = TestLoader()
result = loader.suiteClass()
suite = loader.loadTestsFromNames(module_names)
result.addTests(suite)
return result
ntdb-1.0/buildtools/wafsamba/tests/test_abi.py 0000664 0000000 0000000 00000010137 12241515307 0021552 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Jelmer Vernooij
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from wafsamba.tests import TestCase
from wafsamba.samba_abi import (
abi_write_vscript,
normalise_signature,
)
from cStringIO import StringIO
class NormaliseSignatureTests(TestCase):
def test_function_simple(self):
self.assertEquals("int (const struct GUID *, const struct GUID *)",
normalise_signature("$2 = {int (const struct GUID *, const struct GUID *)} 0xe871 "))
def test_maps_Bool(self):
# Some types have different internal names
self.assertEquals("bool (const struct GUID *)",
normalise_signature("$1 = {_Bool (const struct GUID *)} 0xe75b "))
def test_function_keep(self):
self.assertEquals(
"enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)",
normalise_signature("enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)"))
def test_struct_constant(self):
self.assertEquals(
'uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0',
normalise_signature('$239 = {uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0}'))
def test_incomplete_sequence(self):
# Newer versions of gdb insert these incomplete sequence elements
self.assertEquals(
'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2',
normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237", , node = "\\b\\000+\\020H`"}, if_version = 2}'))
self.assertEquals(
'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2',
normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2}'))
class WriteVscriptTests(TestCase):
def test_one(self):
f = StringIO()
abi_write_vscript(f, "MYLIB", "1.0", [], {
"old": "1.0",
"new": "1.0"}, ["*"])
self.assertEquals(f.getvalue(), """\
1.0 {
\tglobal:
\t\t*;
};
""")
def test_simple(self):
# No restrictions.
f = StringIO()
abi_write_vscript(f, "MYLIB", "1.0", ["0.1"], {
"old": "0.1",
"new": "1.0"}, ["*"])
self.assertEquals(f.getvalue(), """\
MYLIB_0.1 {
\tglobal:
\t\told;
};
1.0 {
\tglobal:
\t\t*;
};
""")
def test_exclude(self):
f = StringIO()
abi_write_vscript(f, "MYLIB", "1.0", [], {
"exc_old": "0.1",
"old": "0.1",
"new": "1.0"}, ["!exc_*"])
self.assertEquals(f.getvalue(), """\
1.0 {
\tglobal:
\t\t*;
\tlocal:
\t\texc_*;
};
""")
def test_excludes_and_includes(self):
f = StringIO()
abi_write_vscript(f, "MYLIB", "1.0", [], {
"pub_foo": "1.0",
"exc_bar": "1.0",
"other": "1.0"
}, ["pub_*", "!exc_*"])
self.assertEquals(f.getvalue(), """\
1.0 {
\tglobal:
\t\tpub_*;
\tlocal:
\t\texc_*;
\t\t*;
};
""")
ntdb-1.0/buildtools/wafsamba/tests/test_bundled.py 0000664 0000000 0000000 00000001764 12241515307 0022442 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Jelmer Vernooij
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from wafsamba.tests import TestCase
from wafsamba.samba_bundled import (
tuplize_version,
)
class TuplizeVersionTests(TestCase):
def test_simple(self):
self.assertEquals((1, 2, 10), tuplize_version("1.2.10"))
ntdb-1.0/buildtools/wafsamba/tests/test_utils.py 0000664 0000000 0000000 00000004734 12241515307 0022165 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Jelmer Vernooij
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from wafsamba.tests import TestCase
from wafsamba.samba_utils import (
TO_LIST,
dict_concat,
subst_vars_error,
unique_list,
)
class ToListTests(TestCase):
def test_none(self):
self.assertEquals([], TO_LIST(None))
def test_already_list(self):
self.assertEquals(["foo", "bar", 1], TO_LIST(["foo", "bar", 1]))
def test_default_delimiter(self):
self.assertEquals(["foo", "bar"], TO_LIST("foo bar"))
self.assertEquals(["foo", "bar"], TO_LIST(" foo bar "))
self.assertEquals(["foo ", "bar"], TO_LIST(" \"foo \" bar "))
def test_delimiter(self):
self.assertEquals(["foo", "bar"], TO_LIST("foo,bar", ","))
self.assertEquals([" foo", "bar "], TO_LIST(" foo,bar ", ","))
self.assertEquals([" \" foo\"", " bar "], TO_LIST(" \" foo\", bar ", ","))
class UniqueListTests(TestCase):
def test_unique_list(self):
self.assertEquals(["foo", "bar"], unique_list(["foo", "bar", "foo"]))
class SubstVarsErrorTests(TestCase):
def test_valid(self):
self.assertEquals("", subst_vars_error("", {}))
self.assertEquals("FOO bar", subst_vars_error("${F} bar", {"F": "FOO"}))
def test_invalid(self):
self.assertRaises(KeyError, subst_vars_error, "${F}", {})
class DictConcatTests(TestCase):
def test_empty(self):
ret = {}
dict_concat(ret, {})
self.assertEquals({}, ret)
def test_same(self):
ret = {"foo": "bar"}
dict_concat(ret, {"foo": "bla"})
self.assertEquals({"foo": "bar"}, ret)
def test_simple(self):
ret = {"foo": "bar"}
dict_concat(ret, {"blie": "bla"})
self.assertEquals({"foo": "bar", "blie": "bla"}, ret)
ntdb-1.0/buildtools/wafsamba/tru64cc.py 0000664 0000000 0000000 00000003633 12241515307 0020113 0 ustar 00root root 0000000 0000000
# compiler definition for tru64/OSF1 cc compiler
# based on suncc.py from waf
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
from compiler_cc import c_compiler
c_compiler['osf1V'] = ['gcc', 'tru64cc']
@conftest
def find_tru64cc(conf):
v = conf.env
cc = None
if v['CC']: cc = v['CC']
elif 'CC' in conf.environ: cc = conf.environ['CC']
if not cc: cc = conf.find_program('cc', var='CC')
if not cc: conf.fatal('tru64cc was not found')
cc = conf.cmd_to_list(cc)
try:
if not Utils.cmd_output(cc + ['-V']):
conf.fatal('tru64cc %r was not found' % cc)
except ValueError:
conf.fatal('tru64cc -V could not be executed')
v['CC'] = cc
v['CC_NAME'] = 'tru64'
@conftest
def tru64cc_common_flags(conf):
v = conf.env
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o', '']
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CCDEFINES_ST'] = '-D%s'
# v['SONAME_ST'] = '-Wl,-h -Wl,%s'
# v['SHLIB_MARKER'] = '-Bdynamic'
# v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# shared library
# v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
v['shlib_LINKFLAGS'] = ['-shared']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
# v['staticlib_LINKFLAGS'] = ['-Bstatic']
# v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_tru64cc
find_cpp
find_ar
tru64cc_common_flags
cc_load_tools
cc_add_flags
link_add_flags
'''
ntdb-1.0/buildtools/wafsamba/wafsamba.py 0000664 0000000 0000000 00000075406 12241515307 0020411 0 ustar 00root root 0000000 0000000 # a waf tool to add autoconf-like macros to the configure section
# and for SAMBA_ macros for building libraries, binaries etc
import Build, os, sys, Options, Task, Utils, cc, TaskGen, fnmatch, re, shutil, Logs, Constants
from Configure import conf
from Logs import debug
from samba_utils import SUBST_VARS_RECURSIVE
TaskGen.task_gen.apply_verif = Utils.nada
# bring in the other samba modules
from samba_optimisation import *
from samba_utils import *
from samba_version import *
from samba_autoconf import *
from samba_patterns import *
from samba_pidl import *
from samba_autoproto import *
from samba_python import *
from samba_deps import *
from samba_bundled import *
import samba_install
import samba_conftests
import samba_abi
import samba_headers
import tru64cc
import irixcc
import hpuxcc
import generic_cc
import samba_dist
import samba_wildcard
import stale_files
import symbols
import pkgconfig
import configure_file
# some systems have broken threading in python
if os.environ.get('WAF_NOTHREADS') == '1':
import nothreads
LIB_PATH="shared"
os.environ['PYTHONUNBUFFERED'] = '1'
if Constants.HEXVERSION < 0x105019:
Logs.error('''
Please use the version of waf that comes with Samba, not
a system installed version. See http://wiki.samba.org/index.php/Waf
for details.
Alternatively, please run ./configure and make as usual. That will
call the right version of waf.''')
sys.exit(1)
@conf
def SAMBA_BUILD_ENV(conf):
'''create the samba build environment'''
conf.env.BUILD_DIRECTORY = conf.blddir
mkdir_p(os.path.join(conf.blddir, LIB_PATH))
mkdir_p(os.path.join(conf.blddir, LIB_PATH, "private"))
mkdir_p(os.path.join(conf.blddir, "modules"))
mkdir_p(os.path.join(conf.blddir, 'python/samba/dcerpc'))
# this allows all of the bin/shared and bin/python targets
# to be expressed in terms of build directory paths
mkdir_p(os.path.join(conf.blddir, 'default'))
for (source, target) in [('shared', 'shared'), ('modules', 'modules'), ('python', 'python_modules')]:
link_target = os.path.join(conf.blddir, 'default/' + target)
if not os.path.lexists(link_target):
os.symlink('../' + source, link_target)
# get perl to put the blib files in the build directory
blib_bld = os.path.join(conf.blddir, 'default/pidl/blib')
blib_src = os.path.join(conf.srcdir, 'pidl/blib')
mkdir_p(blib_bld + '/man1')
mkdir_p(blib_bld + '/man3')
if os.path.islink(blib_src):
os.unlink(blib_src)
elif os.path.exists(blib_src):
shutil.rmtree(blib_src)
def ADD_INIT_FUNCTION(bld, subsystem, target, init_function):
'''add an init_function to the list for a subsystem'''
if init_function is None:
return
bld.ASSERT(subsystem is not None, "You must specify a subsystem for init_function '%s'" % init_function)
cache = LOCAL_CACHE(bld, 'INIT_FUNCTIONS')
if not subsystem in cache:
cache[subsystem] = []
cache[subsystem].append( { 'TARGET':target, 'INIT_FUNCTION':init_function } )
Build.BuildContext.ADD_INIT_FUNCTION = ADD_INIT_FUNCTION
#################################################################
def SAMBA_LIBRARY(bld, libname, source,
deps='',
public_deps='',
includes='',
public_headers=None,
public_headers_install=True,
header_path=None,
pc_files=None,
vnum=None,
soname=None,
cflags='',
ldflags='',
external_library=False,
realname=None,
autoproto=None,
autoproto_extra_source='',
group='main',
depends_on='',
local_include=True,
global_include=True,
vars=None,
subdir=None,
install_path=None,
install=True,
pyembed=False,
pyext=False,
target_type='LIBRARY',
bundled_extension=True,
link_name=None,
abi_directory=None,
abi_match=None,
hide_symbols=False,
manpages=None,
private_library=False,
grouping_library=False,
allow_undefined_symbols=False,
enabled=True):
'''define a Samba library'''
if LIB_MUST_BE_PRIVATE(bld, libname):
private_library=True
if not enabled:
SET_TARGET_TYPE(bld, libname, 'DISABLED')
return
source = bld.EXPAND_VARIABLES(source, vars=vars)
if subdir:
source = bld.SUBDIR(subdir, source)
# remember empty libraries, so we can strip the dependencies
if ((source == '') or (source == [])) and deps == '' and public_deps == '':
SET_TARGET_TYPE(bld, libname, 'EMPTY')
return
if BUILTIN_LIBRARY(bld, libname):
obj_target = libname
else:
obj_target = libname + '.objlist'
if group == 'libraries':
subsystem_group = 'main'
else:
subsystem_group = group
# first create a target for building the object files for this library
# by separating in this way, we avoid recompiling the C files
# separately for the install library and the build library
bld.SAMBA_SUBSYSTEM(obj_target,
source = source,
deps = deps,
public_deps = public_deps,
includes = includes,
public_headers = public_headers,
public_headers_install = public_headers_install,
header_path = header_path,
cflags = cflags,
group = subsystem_group,
autoproto = autoproto,
autoproto_extra_source=autoproto_extra_source,
depends_on = depends_on,
hide_symbols = hide_symbols,
pyembed = pyembed,
pyext = pyext,
local_include = local_include,
global_include = global_include)
if BUILTIN_LIBRARY(bld, libname):
return
if not SET_TARGET_TYPE(bld, libname, target_type):
return
# the library itself will depend on that object target
deps += ' ' + public_deps
deps = TO_LIST(deps)
deps.append(obj_target)
realname = bld.map_shlib_extension(realname, python=(target_type=='PYTHON'))
link_name = bld.map_shlib_extension(link_name, python=(target_type=='PYTHON'))
# we don't want any public libraries without version numbers
if (not private_library and target_type != 'PYTHON' and not realname):
if vnum is None and soname is None:
raise Utils.WafError("public library '%s' must have a vnum" %
libname)
if pc_files is None:
raise Utils.WafError("public library '%s' must have pkg-config file" %
libname)
if public_headers is None:
raise Utils.WafError("public library '%s' must have header files" %
libname)
if target_type == 'PYTHON' or realname or not private_library:
bundled_name = libname.replace('_', '-')
else:
bundled_name = PRIVATE_NAME(bld, libname, bundled_extension,
private_library)
ldflags = TO_LIST(ldflags)
features = 'cc cshlib symlink_lib install_lib'
if pyext:
features += ' pyext'
if pyembed:
features += ' pyembed'
if abi_directory:
features += ' abi_check'
vscript = None
if bld.env.HAVE_LD_VERSION_SCRIPT:
if private_library:
version = "%s_%s" % (Utils.g_module.APPNAME, Utils.g_module.VERSION)
elif vnum:
version = "%s_%s" % (libname, vnum)
else:
version = None
if version:
vscript = "%s.vscript" % libname
bld.ABI_VSCRIPT(libname, abi_directory, version, vscript,
abi_match)
fullname = apply_pattern(bundled_name, bld.env.shlib_PATTERN)
fullpath = bld.path.find_or_declare(fullname)
vscriptpath = bld.path.find_or_declare(vscript)
if not fullpath:
raise Utils.WafError("unable to find fullpath for %s" % fullname)
if not vscriptpath:
raise Utils.WafError("unable to find vscript path for %s" % vscript)
bld.add_manual_dependency(fullpath, vscriptpath)
if Options.is_install:
# also make the .inst file depend on the vscript
instname = apply_pattern(bundled_name + '.inst', bld.env.shlib_PATTERN)
bld.add_manual_dependency(bld.path.find_or_declare(instname), bld.path.find_or_declare(vscript))
vscript = os.path.join(bld.path.abspath(bld.env), vscript)
bld.SET_BUILD_GROUP(group)
t = bld(
features = features,
source = [],
target = bundled_name,
depends_on = depends_on,
samba_ldflags = ldflags,
samba_deps = deps,
samba_includes = includes,
version_script = vscript,
local_include = local_include,
global_include = global_include,
vnum = vnum,
soname = soname,
install_path = None,
samba_inst_path = install_path,
name = libname,
samba_realname = realname,
samba_install = install,
abi_directory = "%s/%s" % (bld.path.abspath(), abi_directory),
abi_match = abi_match,
private_library = private_library,
grouping_library=grouping_library,
allow_undefined_symbols=allow_undefined_symbols
)
if realname and not link_name:
link_name = 'shared/%s' % realname
if link_name:
t.link_name = link_name
if pc_files is not None and not private_library:
bld.PKG_CONFIG_FILES(pc_files, vnum=vnum)
if (manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and
bld.env['XSLTPROC_MANPAGES']):
bld.MANPAGES(manpages, install)
Build.BuildContext.SAMBA_LIBRARY = SAMBA_LIBRARY
#################################################################
def SAMBA_BINARY(bld, binname, source,
deps='',
includes='',
public_headers=None,
header_path=None,
modules=None,
ldflags=None,
cflags='',
autoproto=None,
use_hostcc=False,
use_global_deps=True,
compiler=None,
group='main',
manpages=None,
local_include=True,
global_include=True,
subsystem_name=None,
pyembed=False,
vars=None,
subdir=None,
install=True,
install_path=None,
enabled=True):
'''define a Samba binary'''
if not enabled:
SET_TARGET_TYPE(bld, binname, 'DISABLED')
return
if not SET_TARGET_TYPE(bld, binname, 'BINARY'):
return
features = 'cc cprogram symlink_bin install_bin'
if pyembed:
features += ' pyembed'
obj_target = binname + '.objlist'
source = bld.EXPAND_VARIABLES(source, vars=vars)
if subdir:
source = bld.SUBDIR(subdir, source)
source = unique_list(TO_LIST(source))
if group == 'binaries':
subsystem_group = 'main'
else:
subsystem_group = group
# only specify PIE flags for binaries
pie_cflags = cflags
pie_ldflags = TO_LIST(ldflags)
if bld.env['ENABLE_PIE'] == True:
pie_cflags += ' -fPIE'
pie_ldflags.extend(TO_LIST('-pie'))
if bld.env['ENABLE_RELRO'] == True:
pie_ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now'))
# first create a target for building the object files for this binary
# by separating in this way, we avoid recompiling the C files
# separately for the install binary and the build binary
bld.SAMBA_SUBSYSTEM(obj_target,
source = source,
deps = deps,
includes = includes,
cflags = pie_cflags,
group = subsystem_group,
autoproto = autoproto,
subsystem_name = subsystem_name,
local_include = local_include,
global_include = global_include,
use_hostcc = use_hostcc,
pyext = pyembed,
use_global_deps= use_global_deps)
bld.SET_BUILD_GROUP(group)
# the binary itself will depend on that object target
deps = TO_LIST(deps)
deps.append(obj_target)
t = bld(
features = features,
source = [],
target = binname,
samba_deps = deps,
samba_includes = includes,
local_include = local_include,
global_include = global_include,
samba_modules = modules,
top = True,
samba_subsystem= subsystem_name,
install_path = None,
samba_inst_path= install_path,
samba_install = install,
samba_ldflags = pie_ldflags
)
if manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']:
bld.MANPAGES(manpages, install)
Build.BuildContext.SAMBA_BINARY = SAMBA_BINARY
#################################################################
def SAMBA_MODULE(bld, modname, source,
deps='',
includes='',
subsystem=None,
init_function=None,
module_init_name='samba_init_module',
autoproto=None,
autoproto_extra_source='',
cflags='',
internal_module=True,
local_include=True,
global_include=True,
vars=None,
subdir=None,
enabled=True,
pyembed=False,
manpages=None,
allow_undefined_symbols=False
):
'''define a Samba module.'''
source = bld.EXPAND_VARIABLES(source, vars=vars)
if subdir:
source = bld.SUBDIR(subdir, source)
if internal_module or BUILTIN_LIBRARY(bld, modname):
# Do not create modules for disabled subsystems
if subsystem and GET_TARGET_TYPE(bld, subsystem) == 'DISABLED':
return
bld.SAMBA_SUBSYSTEM(modname, source,
deps=deps,
includes=includes,
autoproto=autoproto,
autoproto_extra_source=autoproto_extra_source,
cflags=cflags,
local_include=local_include,
global_include=global_include,
enabled=enabled)
bld.ADD_INIT_FUNCTION(subsystem, modname, init_function)
return
if not enabled:
SET_TARGET_TYPE(bld, modname, 'DISABLED')
return
# Do not create modules for disabled subsystems
if subsystem and GET_TARGET_TYPE(bld, subsystem) == 'DISABLED':
return
obj_target = modname + '.objlist'
realname = modname
if subsystem is not None:
deps += ' ' + subsystem
while realname.startswith("lib"+subsystem+"_"):
realname = realname[len("lib"+subsystem+"_"):]
while realname.startswith(subsystem+"_"):
realname = realname[len(subsystem+"_"):]
realname = bld.make_libname(realname)
while realname.startswith("lib"):
realname = realname[len("lib"):]
build_link_name = "modules/%s/%s" % (subsystem, realname)
if init_function:
cflags += " -D%s=%s" % (init_function, module_init_name)
bld.SAMBA_LIBRARY(modname,
source,
deps=deps,
includes=includes,
cflags=cflags,
realname = realname,
autoproto = autoproto,
local_include=local_include,
global_include=global_include,
vars=vars,
link_name=build_link_name,
install_path="${MODULESDIR}/%s" % subsystem,
pyembed=pyembed,
manpages=manpages,
allow_undefined_symbols=allow_undefined_symbols
)
Build.BuildContext.SAMBA_MODULE = SAMBA_MODULE
#################################################################
def SAMBA_SUBSYSTEM(bld, modname, source,
deps='',
public_deps='',
includes='',
public_headers=None,
public_headers_install=True,
header_path=None,
cflags='',
cflags_end=None,
group='main',
init_function_sentinel=None,
autoproto=None,
autoproto_extra_source='',
depends_on='',
local_include=True,
local_include_first=True,
global_include=True,
subsystem_name=None,
enabled=True,
use_hostcc=False,
use_global_deps=True,
vars=None,
subdir=None,
hide_symbols=False,
pyext=False,
pyembed=False):
'''define a Samba subsystem'''
if not enabled:
SET_TARGET_TYPE(bld, modname, 'DISABLED')
return
# remember empty subsystems, so we can strip the dependencies
if ((source == '') or (source == [])) and deps == '' and public_deps == '':
SET_TARGET_TYPE(bld, modname, 'EMPTY')
return
if not SET_TARGET_TYPE(bld, modname, 'SUBSYSTEM'):
return
source = bld.EXPAND_VARIABLES(source, vars=vars)
if subdir:
source = bld.SUBDIR(subdir, source)
source = unique_list(TO_LIST(source))
deps += ' ' + public_deps
bld.SET_BUILD_GROUP(group)
features = 'cc'
if pyext:
features += ' pyext'
if pyembed:
features += ' pyembed'
t = bld(
features = features,
source = source,
target = modname,
samba_cflags = CURRENT_CFLAGS(bld, modname, cflags, hide_symbols=hide_symbols),
depends_on = depends_on,
samba_deps = TO_LIST(deps),
samba_includes = includes,
local_include = local_include,
local_include_first = local_include_first,
global_include = global_include,
samba_subsystem= subsystem_name,
samba_use_hostcc = use_hostcc,
samba_use_global_deps = use_global_deps,
)
if cflags_end is not None:
t.samba_cflags.extend(TO_LIST(cflags_end))
if autoproto is not None:
bld.SAMBA_AUTOPROTO(autoproto, source + TO_LIST(autoproto_extra_source))
if public_headers is not None:
bld.PUBLIC_HEADERS(public_headers, header_path=header_path,
public_headers_install=public_headers_install)
return t
Build.BuildContext.SAMBA_SUBSYSTEM = SAMBA_SUBSYSTEM
def SAMBA_GENERATOR(bld, name, rule, source='', target='',
group='generators', enabled=True,
public_headers=None,
public_headers_install=True,
header_path=None,
vars=None,
always=False):
'''A generic source generator target'''
if not SET_TARGET_TYPE(bld, name, 'GENERATOR'):
return
if not enabled:
return
dep_vars = []
if isinstance(vars, dict):
dep_vars = vars.keys()
elif isinstance(vars, list):
dep_vars = vars
bld.SET_BUILD_GROUP(group)
t = bld(
rule=rule,
source=bld.EXPAND_VARIABLES(source, vars=vars),
target=target,
shell=isinstance(rule, str),
on_results=True,
before='cc',
ext_out='.c',
samba_type='GENERATOR',
dep_vars = [rule] + dep_vars,
name=name)
if always:
t.always = True
if public_headers is not None:
bld.PUBLIC_HEADERS(public_headers, header_path=header_path,
public_headers_install=public_headers_install)
return t
Build.BuildContext.SAMBA_GENERATOR = SAMBA_GENERATOR
@runonce
def SETUP_BUILD_GROUPS(bld):
'''setup build groups used to ensure that the different build
phases happen consecutively'''
bld.p_ln = bld.srcnode # we do want to see all targets!
bld.env['USING_BUILD_GROUPS'] = True
bld.add_group('setup')
bld.add_group('build_compiler_source')
bld.add_group('vscripts')
bld.add_group('base_libraries')
bld.add_group('generators')
bld.add_group('compiler_prototypes')
bld.add_group('compiler_libraries')
bld.add_group('build_compilers')
bld.add_group('build_source')
bld.add_group('prototypes')
bld.add_group('headers')
bld.add_group('main')
bld.add_group('symbolcheck')
bld.add_group('syslibcheck')
bld.add_group('final')
Build.BuildContext.SETUP_BUILD_GROUPS = SETUP_BUILD_GROUPS
def SET_BUILD_GROUP(bld, group):
'''set the current build group'''
if not 'USING_BUILD_GROUPS' in bld.env:
return
bld.set_group(group)
Build.BuildContext.SET_BUILD_GROUP = SET_BUILD_GROUP
@conf
def ENABLE_TIMESTAMP_DEPENDENCIES(conf):
"""use timestamps instead of file contents for deps
this currently doesn't work"""
def h_file(filename):
import stat
st = os.stat(filename)
if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(filename)
return m.digest()
Utils.h_file = h_file
def SAMBA_SCRIPT(bld, name, pattern, installdir, installname=None):
'''used to copy scripts from the source tree into the build directory
for use by selftest'''
source = bld.path.ant_glob(pattern)
bld.SET_BUILD_GROUP('build_source')
for s in TO_LIST(source):
iname = s
if installname is not None:
iname = installname
target = os.path.join(installdir, iname)
tgtdir = os.path.dirname(os.path.join(bld.srcnode.abspath(bld.env), '..', target))
mkdir_p(tgtdir)
link_src = os.path.normpath(os.path.join(bld.curdir, s))
link_dst = os.path.join(tgtdir, os.path.basename(iname))
if os.path.islink(link_dst) and os.readlink(link_dst) == link_src:
continue
if os.path.exists(link_dst):
os.unlink(link_dst)
Logs.info("symlink: %s -> %s/%s" % (s, installdir, iname))
os.symlink(link_src, link_dst)
Build.BuildContext.SAMBA_SCRIPT = SAMBA_SCRIPT
def copy_and_fix_python_path(task):
pattern='sys.path.insert(0, "bin/python")'
if task.env["PYTHONARCHDIR"] in sys.path and task.env["PYTHONDIR"] in sys.path:
replacement = ""
elif task.env["PYTHONARCHDIR"] == task.env["PYTHONDIR"]:
replacement="""sys.path.insert(0, "%s")""" % task.env["PYTHONDIR"]
else:
replacement="""sys.path.insert(0, "%s")
sys.path.insert(1, "%s")""" % (task.env["PYTHONARCHDIR"], task.env["PYTHONDIR"])
shebang = None
if task.env["PYTHON"][0] == "/":
replacement_shebang = "#!%s\n" % task.env["PYTHON"]
else:
replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PYTHON"]
installed_location=task.outputs[0].bldpath(task.env)
source_file = open(task.inputs[0].srcpath(task.env))
installed_file = open(installed_location, 'w')
lineno = 0
for line in source_file:
newline = line
if lineno == 0 and task.env["PYTHON_SPECIFIED"] == True and line[:2] == "#!":
newline = replacement_shebang
elif pattern in line:
newline = line.replace(pattern, replacement)
installed_file.write(newline)
lineno = lineno + 1
installed_file.close()
os.chmod(installed_location, 0755)
return 0
def install_file(bld, destdir, file, chmod=MODE_644, flat=False,
python_fixup=False, destname=None, base_name=None):
'''install a file'''
destdir = bld.EXPAND_VARIABLES(destdir)
if not destname:
destname = file
if flat:
destname = os.path.basename(destname)
dest = os.path.join(destdir, destname)
if python_fixup:
# fixup the python path it will use to find Samba modules
inst_file = file + '.inst'
bld.SAMBA_GENERATOR('python_%s' % destname,
rule=copy_and_fix_python_path,
source=file,
target=inst_file)
bld.add_manual_dependency(bld.path.find_or_declare(inst_file), bld.env["PYTHONARCHDIR"])
bld.add_manual_dependency(bld.path.find_or_declare(inst_file), bld.env["PYTHONDIR"])
bld.add_manual_dependency(bld.path.find_or_declare(inst_file), str(bld.env["PYTHON_SPECIFIED"]))
bld.add_manual_dependency(bld.path.find_or_declare(inst_file), bld.env["PYTHON"])
file = inst_file
if base_name:
file = os.path.join(base_name, file)
bld.install_as(dest, file, chmod=chmod)
def INSTALL_FILES(bld, destdir, files, chmod=MODE_644, flat=False,
python_fixup=False, destname=None, base_name=None):
'''install a set of files'''
for f in TO_LIST(files):
install_file(bld, destdir, f, chmod=chmod, flat=flat,
python_fixup=python_fixup, destname=destname,
base_name=base_name)
Build.BuildContext.INSTALL_FILES = INSTALL_FILES
def INSTALL_WILDCARD(bld, destdir, pattern, chmod=MODE_644, flat=False,
python_fixup=False, exclude=None, trim_path=None):
'''install a set of files matching a wildcard pattern'''
files=TO_LIST(bld.path.ant_glob(pattern))
if trim_path:
files2 = []
for f in files:
files2.append(os_path_relpath(f, trim_path))
files = files2
if exclude:
for f in files[:]:
if fnmatch.fnmatch(f, exclude):
files.remove(f)
INSTALL_FILES(bld, destdir, files, chmod=chmod, flat=flat,
python_fixup=python_fixup, base_name=trim_path)
Build.BuildContext.INSTALL_WILDCARD = INSTALL_WILDCARD
def INSTALL_DIRS(bld, destdir, dirs):
'''install a set of directories'''
destdir = bld.EXPAND_VARIABLES(destdir)
dirs = bld.EXPAND_VARIABLES(dirs)
for d in TO_LIST(dirs):
bld.install_dir(os.path.join(destdir, d))
Build.BuildContext.INSTALL_DIRS = INSTALL_DIRS
def MANPAGES(bld, manpages, install):
'''build and install manual pages'''
bld.env.MAN_XSL = 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl'
for m in manpages.split():
source = m + '.xml'
bld.SAMBA_GENERATOR(m,
source=source,
target=m,
group='final',
rule='${XSLTPROC} --xinclude -o ${TGT} --nonet ${MAN_XSL} ${SRC}'
)
if install:
bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True)
Build.BuildContext.MANPAGES = MANPAGES
def SAMBAMANPAGES(bld, manpages):
'''build and install manual pages'''
bld.env.SAMBA_EXPAND_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/expand-sambadoc.xsl'
bld.env.SAMBA_MAN_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/man.xsl'
bld.env.SAMBA_CATALOGS = 'file:///etc/xml/catalog file:///usr/local/share/xml/catalog file://' + bld.srcnode.abspath() + '/bin/default/docs-xml/build/catalog.xml'
for m in manpages.split():
source = m + '.xml'
bld.SAMBA_GENERATOR(m,
source=source,
target=m,
group='final',
rule='''XML_CATALOG_FILES="${SAMBA_CATALOGS}"
export XML_CATALOG_FILES
${XSLTPROC} --xinclude --stringparam noreference 0 -o ${TGT}.xml --nonet ${SAMBA_EXPAND_XSL} ${SRC}
${XSLTPROC} --nonet -o ${TGT} ${SAMBA_MAN_XSL} ${TGT}.xml'''
)
bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True)
Build.BuildContext.SAMBAMANPAGES = SAMBAMANPAGES
#############################################################
# give a nicer display when building different types of files
def progress_display(self, msg, fname):
col1 = Logs.colors(self.color)
col2 = Logs.colors.NORMAL
total = self.position[1]
n = len(str(total))
fs = '[%%%dd/%%%dd] %s %%s%%s%%s\n' % (n, n, msg)
return fs % (self.position[0], self.position[1], col1, fname, col2)
def link_display(self):
if Options.options.progress_bar != 0:
return Task.Task.old_display(self)
fname = self.outputs[0].bldpath(self.env)
return progress_display(self, 'Linking', fname)
Task.TaskBase.classes['cc_link'].display = link_display
def samba_display(self):
if Options.options.progress_bar != 0:
return Task.Task.old_display(self)
targets = LOCAL_CACHE(self, 'TARGET_TYPE')
if self.name in targets:
target_type = targets[self.name]
type_map = { 'GENERATOR' : 'Generating',
'PROTOTYPE' : 'Generating'
}
if target_type in type_map:
return progress_display(self, type_map[target_type], self.name)
if len(self.inputs) == 0:
return Task.Task.old_display(self)
fname = self.inputs[0].bldpath(self.env)
if fname[0:3] == '../':
fname = fname[3:]
ext_loc = fname.rfind('.')
if ext_loc == -1:
return Task.Task.old_display(self)
ext = fname[ext_loc:]
ext_map = { '.idl' : 'Compiling IDL',
'.et' : 'Compiling ERRTABLE',
'.asn1': 'Compiling ASN1',
'.c' : 'Compiling' }
if ext in ext_map:
return progress_display(self, ext_map[ext], fname)
return Task.Task.old_display(self)
Task.TaskBase.classes['Task'].old_display = Task.TaskBase.classes['Task'].display
Task.TaskBase.classes['Task'].display = samba_display
@after('apply_link')
@feature('cshlib')
def apply_bundle_remove_dynamiclib_patch(self):
if self.env['MACBUNDLE'] or getattr(self,'mac_bundle',False):
if not getattr(self,'vnum',None):
try:
self.env['LINKFLAGS'].remove('-dynamiclib')
self.env['LINKFLAGS'].remove('-single_module')
except ValueError:
pass
ntdb-1.0/buildtools/wafsamba/wscript 0000775 0000000 0000000 00000046173 12241515307 0017676 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# this is a base set of waf rules that everything else pulls in first
import sys, wafsamba, Configure, Logs
import Options, os, preproc
from samba_utils import *
from optparse import SUPPRESS_HELP
# this forces configure to be re-run if any of the configure
# sections of the build scripts change. We have to check
# for this in sys.argv as options have not yet been parsed when
# we need to set this. This is off by default until some issues
# are resolved related to WAFCACHE. It will need a lot of testing
# before it is enabled by default.
if '--enable-auto-reconfigure' in sys.argv:
Configure.autoconfig = True
def set_options(opt):
opt.tool_options('compiler_cc')
opt.tool_options('gnu_dirs')
gr = opt.option_group('library handling options')
gr.add_option('--bundled-libraries',
help=("comma separated list of bundled libraries. May include !LIBNAME to disable bundling a library. Can be 'NONE' or 'ALL' [auto]"),
action="store", dest='BUNDLED_LIBS', default='')
gr.add_option('--private-libraries',
help=("comma separated list of normally public libraries to build instead as private libraries. May include !LIBNAME to disable making a library private. Can be 'NONE' or 'ALL' [auto]"),
action="store", dest='PRIVATE_LIBS', default='')
extension_default = Options.options['PRIVATE_EXTENSION_DEFAULT']
gr.add_option('--private-library-extension',
help=("name extension for private libraries [%s]" % extension_default),
action="store", dest='PRIVATE_EXTENSION', default=extension_default)
extension_exception = Options.options['PRIVATE_EXTENSION_EXCEPTION']
gr.add_option('--private-extension-exception',
help=("comma separated list of libraries to not apply extension to [%s]" % extension_exception),
action="store", dest='PRIVATE_EXTENSION_EXCEPTION', default=extension_exception)
builtin_defauilt = Options.options['BUILTIN_LIBRARIES_DEFAULT']
gr.add_option('--builtin-libraries',
help=("command separated list of libraries to build directly into binaries [%s]" % builtin_defauilt),
action="store", dest='BUILTIN_LIBRARIES', default=builtin_defauilt)
gr.add_option('--minimum-library-version',
help=("list of minimum system library versions (LIBNAME1:version,LIBNAME2:version)"),
action="store", dest='MINIMUM_LIBRARY_VERSION', default='')
gr.add_option('--disable-rpath',
help=("Disable use of rpath for build binaries"),
action="store_true", dest='disable_rpath_build', default=False)
gr.add_option('--disable-rpath-install',
help=("Disable use of rpath for library path in installed files"),
action="store_true", dest='disable_rpath_install', default=False)
gr.add_option('--disable-rpath-private-install',
help=("Disable use of rpath for private library path in installed files"),
action="store_true", dest='disable_rpath_private_install', default=False)
gr.add_option('--nonshared-binary',
help=("Disable use of shared libs for the listed binaries"),
action="store", dest='NONSHARED_BINARIES', default='')
gr.add_option('--disable-symbol-versions',
help=("Disable use of the --version-script linker option"),
action="store_true", dest='disable_symbol_versions', default=False)
opt.add_option('--with-modulesdir',
help=("modules directory [PREFIX/modules]"),
action="store", dest='MODULESDIR', default='${PREFIX}/modules')
opt.add_option('--with-privatelibdir',
help=("private library directory [PREFIX/lib/%s]" % Utils.g_module.APPNAME),
action="store", dest='PRIVATELIBDIR', default=None)
opt.add_option('--with-libiconv',
help='additional directory to search for libiconv',
action='store', dest='iconv_open', default='/usr/local',
match = ['Checking for library iconv', 'Checking for iconv_open', 'Checking for header iconv.h'])
opt.add_option('--with-gettext',
help='additional directory to search for gettext',
action='store', dest='gettext_location', default='/usr/local',
match = ['Checking for library intl', 'Checking for header libintl.h'])
opt.add_option('--without-gettext',
help=("Disable use of gettext"),
action="store_true", dest='disable_gettext', default=False)
gr = opt.option_group('developer options')
gr.add_option('-C',
help='enable configure cacheing',
action='store_true', dest='enable_configure_cache')
gr.add_option('--enable-auto-reconfigure',
help='enable automatic reconfigure on build',
action='store_true', dest='enable_auto_reconfigure')
gr.add_option('--enable-debug',
help=("Turn on debugging symbols"),
action="store_true", dest='debug', default=False)
gr.add_option('--enable-developer',
help=("Turn on developer warnings and debugging"),
action="store_true", dest='developer', default=False)
gr.add_option('--picky-developer',
help=("Treat all warnings as errors (enable -Werror)"),
action="store_true", dest='picky_developer', default=False)
gr.add_option('--fatal-errors',
help=("Stop compilation on first error (enable -Wfatal-errors)"),
action="store_true", dest='fatal_errors', default=False)
gr.add_option('--enable-gccdeps',
help=("Enable use of gcc -MD dependency module"),
action="store_true", dest='enable_gccdeps', default=True)
gr.add_option('--timestamp-dependencies',
help=("use file timestamps instead of content for build dependencies (BROKEN)"),
action="store_true", dest='timestamp_dependencies', default=False)
gr.add_option('--pedantic',
help=("Enable even more compiler warnings"),
action='store_true', dest='pedantic', default=False)
gr.add_option('--git-local-changes',
help=("mark version with + if local git changes"),
action='store_true', dest='GIT_LOCAL_CHANGES', default=False)
gr.add_option('--abi-check',
help=("Check ABI signatures for libraries"),
action='store_true', dest='ABI_CHECK', default=False)
gr.add_option('--abi-check-disable',
help=("Disable ABI checking (used with --enable-developer)"),
action='store_true', dest='ABI_CHECK_DISABLE', default=False)
gr.add_option('--abi-update',
help=("Update ABI signature files for libraries"),
action='store_true', dest='ABI_UPDATE', default=False)
gr.add_option('--show-deps',
help=("Show dependency tree for the given target"),
dest='SHOWDEPS', default='')
gr.add_option('--symbol-check',
help=("check symbols in object files against project rules"),
action='store_true', dest='SYMBOLCHECK', default=False)
gr.add_option('--dup-symbol-check',
help=("check for duplicate symbols in object files and system libs (must be configured with --enable-developer)"),
action='store_true', dest='DUP_SYMBOLCHECK', default=False)
gr.add_option('--why-needed',
help=("TARGET:DEPENDENCY check why TARGET needs DEPENDENCY"),
action='store', type='str', dest='WHYNEEDED', default=None)
gr.add_option('--show-duplicates',
help=("Show objects which are included in multiple binaries or libraries"),
action='store_true', dest='SHOW_DUPLICATES', default=False)
gr = opt.add_option_group('cross compilation options')
gr.add_option('--cross-compile',
help=("configure for cross-compilation"),
action='store_true', dest='CROSS_COMPILE', default=False)
gr.add_option('--cross-execute',
help=("command prefix to use for cross-execution in configure"),
action='store', dest='CROSS_EXECUTE', default='')
gr.add_option('--cross-answers',
help=("answers to cross-compilation configuration (auto modified)"),
action='store', dest='CROSS_ANSWERS', default='')
gr.add_option('--hostcc',
help=("set host compiler when cross compiling"),
action='store', dest='HOSTCC', default=False)
# we use SUPPRESS_HELP for these, as they are ignored, and are there only
# to allow existing RPM spec files to work
opt.add_option('--build',
help=SUPPRESS_HELP,
action='store', dest='AUTOCONF_BUILD', default='')
opt.add_option('--host',
help=SUPPRESS_HELP,
action='store', dest='AUTOCONF_HOST', default='')
opt.add_option('--target',
help=SUPPRESS_HELP,
action='store', dest='AUTOCONF_TARGET', default='')
opt.add_option('--program-prefix',
help=SUPPRESS_HELP,
action='store', dest='AUTOCONF_PROGRAM_PREFIX', default='')
opt.add_option('--disable-dependency-tracking',
help=SUPPRESS_HELP,
action='store_true', dest='AUTOCONF_DISABLE_DEPENDENCY_TRACKING', default=False)
opt.add_option('--disable-silent-rules',
help=SUPPRESS_HELP,
action='store_true', dest='AUTOCONF_DISABLE_SILENT_RULES', default=False)
gr = opt.option_group('dist options')
gr.add_option('--sign-release',
help='sign the release tarball created by waf dist',
action='store_true', dest='SIGN_RELEASE')
gr.add_option('--tag',
help='tag release in git at the same time',
type='string', action='store', dest='TAG_RELEASE')
@wafsamba.runonce
def configure(conf):
conf.env.hlist = []
conf.env.srcdir = conf.srcdir
if Options.options.timestamp_dependencies:
conf.ENABLE_TIMESTAMP_DEPENDENCIES()
conf.SETUP_CONFIGURE_CACHE(Options.options.enable_configure_cache)
# load our local waf extensions
conf.check_tool('gnu_dirs')
conf.check_tool('wafsamba')
conf.CHECK_CC_ENV()
conf.check_tool('compiler_cc')
# we need git for 'waf dist'
conf.find_program('git', var='GIT')
# older gcc versions (< 4.4) does not work with gccdeps, so we have to see if the .d file is generated
if Options.options.enable_gccdeps:
from TaskGen import feature, after
@feature('testd')
@after('apply_core')
def check_d(self):
tsk = self.compiled_tasks[0]
tsk.outputs.append(tsk.outputs[0].change_ext('.d'))
import Task
cc = Task.TaskBase.classes['cc']
oldmeth = cc.run
cc.run = Task.compile_fun_noshell('cc', '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT[0].abspath(env)}')[0]
try:
try:
conf.check(features='cc testd', fragment='int main() {return 0;}\n', ccflags=['-MD'], mandatory=True, msg='Check for -MD')
except:
pass
else:
conf.check_tool('gccdeps', tooldir=conf.srcdir + "/buildtools/wafsamba")
finally:
cc.run = oldmeth
# make the install paths available in environment
conf.env.LIBDIR = Options.options.LIBDIR or '${PREFIX}/lib'
conf.env.BINDIR = Options.options.BINDIR or '${PREFIX}/bin'
conf.env.SBINDIR = Options.options.SBINDIR or '${PREFIX}/sbin'
conf.env.MODULESDIR = Options.options.MODULESDIR
conf.env.PRIVATELIBDIR = Options.options.PRIVATELIBDIR
conf.env.BUNDLED_LIBS = Options.options.BUNDLED_LIBS.split(',')
conf.env.PRIVATE_LIBS = Options.options.PRIVATE_LIBS.split(',')
conf.env.BUILTIN_LIBRARIES = Options.options.BUILTIN_LIBRARIES.split(',')
conf.env.NONSHARED_BINARIES = Options.options.NONSHARED_BINARIES.split(',')
conf.env.PRIVATE_EXTENSION = Options.options.PRIVATE_EXTENSION
conf.env.PRIVATE_EXTENSION_EXCEPTION = Options.options.PRIVATE_EXTENSION_EXCEPTION.split(',')
conf.env.CROSS_COMPILE = Options.options.CROSS_COMPILE
conf.env.CROSS_EXECUTE = Options.options.CROSS_EXECUTE
conf.env.CROSS_ANSWERS = Options.options.CROSS_ANSWERS
conf.env.HOSTCC = Options.options.HOSTCC
conf.env.AUTOCONF_BUILD = Options.options.AUTOCONF_BUILD
conf.env.AUTOCONF_HOST = Options.options.AUTOCONF_HOST
conf.env.AUTOCONF_PROGRAM_PREFIX = Options.options.AUTOCONF_PROGRAM_PREFIX
if (conf.env.AUTOCONF_HOST and
conf.env.AUTOCONF_BUILD and
conf.env.AUTOCONF_BUILD != conf.env.AUTOCONF_HOST):
Logs.error('ERROR: Mismatch between --build and --host. Please use --cross-compile instead')
sys.exit(1)
if conf.env.AUTOCONF_PROGRAM_PREFIX:
Logs.error('ERROR: --program-prefix not supported')
sys.exit(1)
# enable ABI checking for developers
conf.env.ABI_CHECK = Options.options.ABI_CHECK or Options.options.developer
if Options.options.ABI_CHECK_DISABLE:
conf.env.ABI_CHECK = False
try:
conf.find_program('gdb', mandatory=True)
except:
conf.env.ABI_CHECK = False
conf.env.GIT_LOCAL_CHANGES = Options.options.GIT_LOCAL_CHANGES
conf.CHECK_COMMAND(['uname', '-a'],
msg='Checking build system',
define='BUILD_SYSTEM',
on_target=False)
conf.CHECK_UNAME()
# see if we can compile and run a simple C program
conf.CHECK_CODE('printf("hello world")',
define='HAVE_SIMPLE_C_PROG',
mandatory=True,
execute=True,
headers='stdio.h',
msg='Checking simple C program')
# check for rpath
if conf.CHECK_LIBRARY_SUPPORT(rpath=True):
support_rpath = True
conf.env.RPATH_ON_BUILD = not Options.options.disable_rpath_build
conf.env.RPATH_ON_INSTALL = (conf.env.RPATH_ON_BUILD and
not Options.options.disable_rpath_install)
if not conf.env.PRIVATELIBDIR:
conf.env.PRIVATELIBDIR = '%s/%s' % (conf.env.LIBDIR, Utils.g_module.APPNAME)
conf.env.RPATH_ON_INSTALL_PRIVATE = (
not Options.options.disable_rpath_private_install)
else:
support_rpath = False
conf.env.RPATH_ON_INSTALL = False
conf.env.RPATH_ON_BUILD = False
conf.env.RPATH_ON_INSTALL_PRIVATE = False
if not conf.env.PRIVATELIBDIR:
# rpath is not possible so there is no sense in having a
# private library directory by default.
# the user can of course always override it.
conf.env.PRIVATELIBDIR = conf.env.LIBDIR
if (not Options.options.disable_symbol_versions and
conf.CHECK_LIBRARY_SUPPORT(rpath=support_rpath,
version_script=True,
msg='-Wl,--version-script support')):
conf.env.HAVE_LD_VERSION_SCRIPT = True
else:
conf.env.HAVE_LD_VERSION_SCRIPT = False
if sys.platform.startswith('aix'):
conf.DEFINE('_ALL_SOURCE', 1, add_to_cflags=True)
# Might not be needed if ALL_SOURCE is defined
# conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True)
# we should use the PIC options in waf instead
# Some compilo didn't support -fPIC but just print a warning
if conf.env['COMPILER_CC'] == "suncc":
conf.ADD_CFLAGS('-KPIC', testflags=True)
# we really want define here as we need to have this
# define even during the tests otherwise detection of
# boolean is broken
conf.DEFINE('_STDC_C99', 1, add_to_cflags=True)
conf.DEFINE('_XPG6', 1, add_to_cflags=True)
else:
conf.ADD_CFLAGS('-fPIC', testflags=True)
# On Solaris 8 with suncc (at least) the flags for the linker to define the name of the
# library are not always working (if the command line is very very long and with a lot
# files)
if conf.env['COMPILER_CC'] == "suncc":
save = conf.env['SONAME_ST']
conf.env['SONAME_ST'] = '-Wl,-h,%s'
if not conf.CHECK_SHLIB_INTRASINC_NAME_FLAGS("Checking if flags %s are ok" % conf.env['SONAME_ST']):
conf.env['SONAME_ST'] = save
conf.CHECK_INLINE()
# check for pkgconfig
conf.check_cfg(atleast_pkgconfig_version='0.0.0')
conf.DEFINE('_GNU_SOURCE', 1, add_to_cflags=True)
conf.DEFINE('_XOPEN_SOURCE_EXTENDED', 1, add_to_cflags=True)
# get the base headers we'll use for the rest of the tests
conf.CHECK_HEADERS('stdio.h sys/types.h sys/stat.h stdlib.h stddef.h memory.h string.h',
add_headers=True)
conf.CHECK_HEADERS('strings.h inttypes.h stdint.h unistd.h minix/config.h', add_headers=True)
conf.CHECK_HEADERS('ctype.h', add_headers=True)
if sys.platform != 'darwin':
conf.CHECK_HEADERS('standards.h', add_headers=True)
conf.CHECK_HEADERS('stdbool.h stdint.h stdarg.h vararg.h', add_headers=True)
conf.CHECK_HEADERS('limits.h assert.h')
# see if we need special largefile flags
if not conf.CHECK_LARGEFILE():
raise Utils.WafError('Samba requires large file support support, but not available on this platform: sizeof(off_t) < 8')
if 'HAVE_STDDEF_H' in conf.env and 'HAVE_STDLIB_H' in conf.env:
conf.DEFINE('STDC_HEADERS', 1)
conf.CHECK_HEADERS('sys/time.h time.h', together=True)
if 'HAVE_SYS_TIME_H' in conf.env and 'HAVE_TIME_H' in conf.env:
conf.DEFINE('TIME_WITH_SYS_TIME', 1)
# cope with different extensions for libraries
(root, ext) = os.path.splitext(conf.env.shlib_PATTERN)
if ext[0] == '.':
conf.define('SHLIBEXT', ext[1:], quote=True)
else:
conf.define('SHLIBEXT', "so", quote=True)
conf.CHECK_CODE('long one = 1; return ((char *)(&one))[0]',
execute=True,
define='WORDS_BIGENDIAN')
# check if signal() takes a void function
if conf.CHECK_CODE('return *(signal (0, 0)) (0) == 1',
define='RETSIGTYPE_INT',
execute=False,
headers='signal.h',
msg='Checking if signal handlers return int'):
conf.DEFINE('RETSIGTYPE', 'int')
else:
conf.DEFINE('RETSIGTYPE', 'void')
conf.CHECK_VARIABLE('__FUNCTION__', define='HAVE_FUNCTION_MACRO')
conf.CHECK_CODE('va_list ap1,ap2; va_copy(ap1,ap2)',
define="HAVE_VA_COPY",
msg="Checking for va_copy")
conf.CHECK_CODE('''
#define eprintf(...) fprintf(stderr, __VA_ARGS__)
eprintf("bla", "bar")
''', define='HAVE__VA_ARGS__MACRO')
conf.SAMBA_BUILD_ENV()
def build(bld):
# give a more useful message if the source directory has moved
relpath = os_path_relpath(bld.curdir, bld.srcnode.abspath())
if relpath.find('../') != -1:
Logs.error('bld.curdir %s is not a child of %s' % (bld.curdir, bld.srcnode.abspath()))
raise Utils.WafError('''The top source directory has moved. Please run distclean and reconfigure''')
bld.CHECK_MAKEFLAGS()
bld.SETUP_BUILD_GROUPS()
bld.ENFORCE_GROUP_ORDERING()
bld.CHECK_PROJECT_RULES()
ntdb-1.0/check.c 0000664 0000000 0000000 00000047253 12241515307 0013535 0 ustar 00root root 0000000 0000000 /*
Trivial Database 2: free list/block handling
Copyright (C) Rusty Russell 2010
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, see .
*/
#include "private.h"
#include
#include
/* We keep an ordered array of offsets. */
static bool append(struct ntdb_context *ntdb,
ntdb_off_t **arr, size_t *num, ntdb_off_t off)
{
ntdb_off_t *new;
if (*num == 0) {
new = ntdb->alloc_fn(ntdb, sizeof(ntdb_off_t), ntdb->alloc_data);
} else {
new = ntdb->expand_fn(*arr, (*num + 1) * sizeof(ntdb_off_t),
ntdb->alloc_data);
}
if (!new)
return false;
new[(*num)++] = off;
*arr = new;
return true;
}
static enum NTDB_ERROR check_header(struct ntdb_context *ntdb,
ntdb_off_t *recovery,
uint64_t *features,
size_t *num_capabilities)
{
uint64_t hash_test;
struct ntdb_header hdr;
enum NTDB_ERROR ecode;
ntdb_off_t off, next;
ecode = ntdb_read_convert(ntdb, 0, &hdr, sizeof(hdr));
if (ecode != NTDB_SUCCESS) {
return ecode;
}
/* magic food should not be converted, so convert back. */
ntdb_convert(ntdb, hdr.magic_food, sizeof(hdr.magic_food));
hash_test = NTDB_HASH_MAGIC;
hash_test = ntdb_hash(ntdb, &hash_test, sizeof(hash_test));
if (hdr.hash_test != hash_test) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"check: hash test %llu should be %llu",
(long long)hdr.hash_test,
(long long)hash_test);
}
if (strcmp(hdr.magic_food, NTDB_MAGIC_FOOD) != 0) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"check: bad magic '%.*s'",
(unsigned)sizeof(hdr.magic_food),
hdr.magic_food);
}
/* Features which are used must be a subset of features offered. */
if (hdr.features_used & ~hdr.features_offered) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"check: features used (0x%llx) which"
" are not offered (0x%llx)",
(long long)hdr.features_used,
(long long)hdr.features_offered);
}
*features = hdr.features_offered;
*recovery = hdr.recovery;
if (*recovery) {
if (*recovery < sizeof(hdr)
|| *recovery > ntdb->file->map_size) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check:"
" invalid recovery offset %zu",
(size_t)*recovery);
}
}
for (off = hdr.capabilities; off && ecode == NTDB_SUCCESS; off = next) {
const struct ntdb_capability *cap;
enum NTDB_ERROR e;
cap = ntdb_access_read(ntdb, off, sizeof(*cap), true);
if (NTDB_PTR_IS_ERR(cap)) {
return NTDB_PTR_ERR(cap);
}
/* All capabilities are unknown. */
e = unknown_capability(ntdb, "ntdb_check", cap->type);
next = cap->next;
ntdb_access_release(ntdb, cap);
if (e)
return e;
(*num_capabilities)++;
}
/* Don't check reserved: they *can* be used later. */
return NTDB_SUCCESS;
}
static int off_cmp(const ntdb_off_t *a, const ntdb_off_t *b)
{
/* Can overflow an int. */
return *a > *b ? 1
: *a < *b ? -1
: 0;
}
static enum NTDB_ERROR check_entry(struct ntdb_context *ntdb,
ntdb_off_t off_and_hash,
ntdb_len_t bucket,
ntdb_off_t used[],
size_t num_used,
size_t *num_found,
enum NTDB_ERROR (*check)(NTDB_DATA,
NTDB_DATA,
void *),
void *data)
{
enum NTDB_ERROR ecode;
const struct ntdb_used_record *r;
const unsigned char *kptr;
ntdb_len_t klen, dlen;
uint32_t hash;
ntdb_off_t off = off_and_hash & NTDB_OFF_MASK;
ntdb_off_t *p;
/* Empty bucket is fine. */
if (!off_and_hash) {
return NTDB_SUCCESS;
}
/* This can't point to a chain, we handled those at toplevel. */
if (off_and_hash & (1ULL << NTDB_OFF_CHAIN_BIT)) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Invalid chain bit in offset "
" %llu", (long long)off_and_hash);
}
p = asearch(&off, used, num_used, off_cmp);
if (!p) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Invalid offset"
" %llu in hash", (long long)off);
}
/* Mark it invalid. */
*p ^= 1;
(*num_found)++;
r = ntdb_access_read(ntdb, off, sizeof(*r), true);
if (NTDB_PTR_IS_ERR(r)) {
return NTDB_PTR_ERR(r);
}
klen = rec_key_length(r);
dlen = rec_data_length(r);
ntdb_access_release(ntdb, r);
kptr = ntdb_access_read(ntdb, off + sizeof(*r), klen + dlen, false);
if (NTDB_PTR_IS_ERR(kptr)) {
return NTDB_PTR_ERR(kptr);
}
hash = ntdb_hash(ntdb, kptr, klen);
/* Are we in the right chain? */
if (bits_from(hash, 0, ntdb->hash_bits) != bucket) {
ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: Bad bucket %u vs %llu",
bits_from(hash, 0, ntdb->hash_bits),
(long long)bucket);
/* Next 8 bits should be the same as top bits of bucket. */
} else if (bits_from(hash, ntdb->hash_bits, NTDB_OFF_UPPER_STEAL)
!= bits_from(off_and_hash, 64-NTDB_OFF_UPPER_STEAL,
NTDB_OFF_UPPER_STEAL)) {
ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: Bad hash bits %llu vs %llu",
(long long)off_and_hash,
(long long)hash);
} else if (check) {
NTDB_DATA k, d;
k = ntdb_mkdata(kptr, klen);
d = ntdb_mkdata(kptr + klen, dlen);
ecode = check(k, d, data);
} else {
ecode = NTDB_SUCCESS;
}
ntdb_access_release(ntdb, kptr);
return ecode;
}
static enum NTDB_ERROR check_hash_chain(struct ntdb_context *ntdb,
ntdb_off_t off,
ntdb_len_t bucket,
ntdb_off_t used[],
size_t num_used,
size_t *num_found,
enum NTDB_ERROR (*check)(NTDB_DATA,
NTDB_DATA,
void *),
void *data)
{
struct ntdb_used_record rec;
enum NTDB_ERROR ecode;
const ntdb_off_t *entries;
ntdb_len_t i, num;
/* This is a used entry. */
(*num_found)++;
ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec));
if (ecode != NTDB_SUCCESS) {
return ecode;
}
if (rec_magic(&rec) != NTDB_CHAIN_MAGIC) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Bad hash chain magic %llu",
(long long)rec_magic(&rec));
}
if (rec_data_length(&rec) % sizeof(ntdb_off_t)) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Bad hash chain data length %llu",
(long long)rec_data_length(&rec));
}
if (rec_key_length(&rec) != 0) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Bad hash chain key length %llu",
(long long)rec_key_length(&rec));
}
off += sizeof(rec);
num = rec_data_length(&rec) / sizeof(ntdb_off_t);
entries = ntdb_access_read(ntdb, off, rec_data_length(&rec), true);
if (NTDB_PTR_IS_ERR(entries)) {
return NTDB_PTR_ERR(entries);
}
/* Check each non-deleted entry in chain. */
for (i = 0; i < num; i++) {
ecode = check_entry(ntdb, entries[i], bucket,
used, num_used, num_found, check, data);
if (ecode) {
break;
}
}
ntdb_access_release(ntdb, entries);
return ecode;
}
static enum NTDB_ERROR check_hash(struct ntdb_context *ntdb,
ntdb_off_t used[],
size_t num_used,
size_t num_other_used,
enum NTDB_ERROR (*check)(NTDB_DATA,
NTDB_DATA,
void *),
void *data)
{
enum NTDB_ERROR ecode;
struct ntdb_used_record rec;
const ntdb_off_t *entries;
ntdb_len_t i;
/* Free tables and capabilities also show up as used, as do we. */
size_t num_found = num_other_used + 1;
ecode = ntdb_read_convert(ntdb, NTDB_HASH_OFFSET, &rec, sizeof(rec));
if (ecode != NTDB_SUCCESS) {
return ecode;
}
if (rec_magic(&rec) != NTDB_HTABLE_MAGIC) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Bad hash table magic %llu",
(long long)rec_magic(&rec));
}
if (rec_data_length(&rec) != (sizeof(ntdb_off_t) << ntdb->hash_bits)) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Bad hash table data length %llu",
(long long)rec_data_length(&rec));
}
if (rec_key_length(&rec) != 0) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Bad hash table key length %llu",
(long long)rec_key_length(&rec));
}
entries = ntdb_access_read(ntdb, NTDB_HASH_OFFSET + sizeof(rec),
rec_data_length(&rec), true);
if (NTDB_PTR_IS_ERR(entries)) {
return NTDB_PTR_ERR(entries);
}
for (i = 0; i < (1 << ntdb->hash_bits); i++) {
ntdb_off_t off = entries[i] & NTDB_OFF_MASK;
if (entries[i] & (1ULL << NTDB_OFF_CHAIN_BIT)) {
ecode = check_hash_chain(ntdb, off, i,
used, num_used, &num_found,
check, data);
} else {
ecode = check_entry(ntdb, entries[i], i,
used, num_used, &num_found,
check, data);
}
if (ecode) {
break;
}
}
ntdb_access_release(ntdb, entries);
if (ecode == NTDB_SUCCESS && num_found != num_used) {
ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Not all entries are in hash");
}
return ecode;
}
static enum NTDB_ERROR check_free(struct ntdb_context *ntdb,
ntdb_off_t off,
const struct ntdb_free_record *frec,
ntdb_off_t prev, unsigned int ftable,
unsigned int bucket)
{
enum NTDB_ERROR ecode;
if (frec_magic(frec) != NTDB_FREE_MAGIC) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: offset %llu bad magic 0x%llx",
(long long)off,
(long long)frec->magic_and_prev);
}
if (frec_ftable(frec) != ftable) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: offset %llu bad freetable %u",
(long long)off, frec_ftable(frec));
}
ecode = ntdb_oob(ntdb, off,
frec_len(frec) + sizeof(struct ntdb_used_record),
false);
if (ecode != NTDB_SUCCESS) {
return ecode;
}
if (size_to_bucket(frec_len(frec)) != bucket) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: offset %llu in wrong bucket"
" (%u vs %u)",
(long long)off,
bucket, size_to_bucket(frec_len(frec)));
}
if (prev && prev != frec_prev(frec)) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: offset %llu bad prev"
" (%llu vs %llu)",
(long long)off,
(long long)prev, (long long)frec_len(frec));
}
return NTDB_SUCCESS;
}
static enum NTDB_ERROR check_free_table(struct ntdb_context *ntdb,
ntdb_off_t ftable_off,
unsigned ftable_num,
ntdb_off_t fr[],
size_t num_free,
size_t *num_found)
{
struct ntdb_freetable ft;
ntdb_off_t h;
unsigned int i;
enum NTDB_ERROR ecode;
ecode = ntdb_read_convert(ntdb, ftable_off, &ft, sizeof(ft));
if (ecode != NTDB_SUCCESS) {
return ecode;
}
if (rec_magic(&ft.hdr) != NTDB_FTABLE_MAGIC
|| rec_key_length(&ft.hdr) != 0
|| rec_data_length(&ft.hdr) != sizeof(ft) - sizeof(ft.hdr)) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Invalid header on free table");
}
for (i = 0; i < NTDB_FREE_BUCKETS; i++) {
ntdb_off_t off, prev = 0, *p, first = 0;
struct ntdb_free_record f;
h = bucket_off(ftable_off, i);
for (off = ntdb_read_off(ntdb, h); off; off = f.next) {
if (NTDB_OFF_IS_ERR(off)) {
return NTDB_OFF_TO_ERR(off);
}
if (!first) {
off &= NTDB_OFF_MASK;
first = off;
}
ecode = ntdb_read_convert(ntdb, off, &f, sizeof(f));
if (ecode != NTDB_SUCCESS) {
return ecode;
}
ecode = check_free(ntdb, off, &f, prev, ftable_num, i);
if (ecode != NTDB_SUCCESS) {
return ecode;
}
/* FIXME: Check hash bits */
p = asearch(&off, fr, num_free, off_cmp);
if (!p) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: Invalid offset"
" %llu in free table",
(long long)off);
}
/* Mark it invalid. */
*p ^= 1;
(*num_found)++;
prev = off;
}
if (first) {
/* Now we can check first back pointer. */
ecode = ntdb_read_convert(ntdb, first, &f, sizeof(f));
if (ecode != NTDB_SUCCESS) {
return ecode;
}
ecode = check_free(ntdb, first, &f, prev, ftable_num, i);
if (ecode != NTDB_SUCCESS) {
return ecode;
}
}
}
return NTDB_SUCCESS;
}
/* Slow, but should be very rare. */
ntdb_off_t dead_space(struct ntdb_context *ntdb, ntdb_off_t off)
{
size_t len;
enum NTDB_ERROR ecode;
for (len = 0; off + len < ntdb->file->map_size; len++) {
char c;
ecode = ntdb->io->tread(ntdb, off, &c, 1);
if (ecode != NTDB_SUCCESS) {
return NTDB_ERR_TO_OFF(ecode);
}
if (c != 0 && c != 0x43)
break;
}
return len;
}
static enum NTDB_ERROR check_linear(struct ntdb_context *ntdb,
ntdb_off_t **used, size_t *num_used,
ntdb_off_t **fr, size_t *num_free,
uint64_t features, ntdb_off_t recovery)
{
ntdb_off_t off;
ntdb_len_t len;
enum NTDB_ERROR ecode;
bool found_recovery = false;
for (off = sizeof(struct ntdb_header);
off < ntdb->file->map_size;
off += len) {
union {
struct ntdb_used_record u;
struct ntdb_free_record f;
struct ntdb_recovery_record r;
} rec;
/* r is larger: only get that if we need to. */
ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec.f));
if (ecode != NTDB_SUCCESS) {
return ecode;
}
/* If we crash after ftruncate, we can get zeroes or fill. */
if (rec.r.magic == NTDB_RECOVERY_INVALID_MAGIC
|| rec.r.magic == 0x4343434343434343ULL) {
ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec.r));
if (ecode != NTDB_SUCCESS) {
return ecode;
}
if (recovery == off) {
found_recovery = true;
len = sizeof(rec.r) + rec.r.max_len;
} else {
len = dead_space(ntdb, off);
if (NTDB_OFF_IS_ERR(len)) {
return NTDB_OFF_TO_ERR(len);
}
if (len < sizeof(rec.r)) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: invalid"
" dead space at %zu",
(size_t)off);
}
ntdb_logerr(ntdb, NTDB_SUCCESS, NTDB_LOG_WARNING,
"Dead space at %zu-%zu (of %zu)",
(size_t)off, (size_t)(off + len),
(size_t)ntdb->file->map_size);
}
} else if (rec.r.magic == NTDB_RECOVERY_MAGIC) {
ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec.r));
if (ecode != NTDB_SUCCESS) {
return ecode;
}
if (recovery != off) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: unexpected"
" recovery record at offset"
" %zu",
(size_t)off);
}
if (rec.r.len > rec.r.max_len) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: invalid recovery"
" length %zu",
(size_t)rec.r.len);
}
if (rec.r.eof > ntdb->file->map_size) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: invalid old EOF"
" %zu", (size_t)rec.r.eof);
}
found_recovery = true;
len = sizeof(rec.r) + rec.r.max_len;
} else if (frec_magic(&rec.f) == NTDB_FREE_MAGIC) {
len = sizeof(rec.u) + frec_len(&rec.f);
if (off + len > ntdb->file->map_size) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: free overlength"
" %llu at offset %llu",
(long long)len,
(long long)off);
}
/* This record should be in free lists. */
if (frec_ftable(&rec.f) != NTDB_FTABLE_NONE
&& !append(ntdb, fr, num_free, off)) {
return ntdb_logerr(ntdb, NTDB_ERR_OOM,
NTDB_LOG_ERROR,
"ntdb_check: tracking %zu'th"
" free record.", *num_free);
}
} else if (rec_magic(&rec.u) == NTDB_USED_MAGIC
|| rec_magic(&rec.u) == NTDB_CHAIN_MAGIC
|| rec_magic(&rec.u) == NTDB_HTABLE_MAGIC
|| rec_magic(&rec.u) == NTDB_FTABLE_MAGIC
|| rec_magic(&rec.u) == NTDB_CAP_MAGIC) {
uint64_t klen, dlen, extra;
/* This record is used! */
if (!append(ntdb, used, num_used, off)) {
return ntdb_logerr(ntdb, NTDB_ERR_OOM,
NTDB_LOG_ERROR,
"ntdb_check: tracking %zu'th"
" used record.", *num_used);
}
klen = rec_key_length(&rec.u);
dlen = rec_data_length(&rec.u);
extra = rec_extra_padding(&rec.u);
len = sizeof(rec.u) + klen + dlen + extra;
if (off + len > ntdb->file->map_size) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: used overlength"
" %llu at offset %llu",
(long long)len,
(long long)off);
}
if (len < sizeof(rec.f)) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: too short record"
" %llu at %llu",
(long long)len,
(long long)off);
}
/* Check that records have correct 0 at end (but may
* not in future). */
if (extra && !features
&& rec_magic(&rec.u) != NTDB_CAP_MAGIC) {
const char *p;
char c;
p = ntdb_access_read(ntdb, off + sizeof(rec.u)
+ klen + dlen, 1, false);
if (NTDB_PTR_IS_ERR(p))
return NTDB_PTR_ERR(p);
c = *p;
ntdb_access_release(ntdb, p);
if (c != '\0') {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check:"
" non-zero extra"
" at %llu",
(long long)off);
}
}
} else {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT,
NTDB_LOG_ERROR,
"ntdb_check: Bad magic 0x%llx"
" at offset %zu",
(long long)rec_magic(&rec.u),
(size_t)off);
}
}
/* We must have found recovery area if there was one. */
if (recovery != 0 && !found_recovery) {
return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: expected a recovery area at %zu",
(size_t)recovery);
}
return NTDB_SUCCESS;
}
_PUBLIC_ enum NTDB_ERROR ntdb_check_(struct ntdb_context *ntdb,
enum NTDB_ERROR (*check)(NTDB_DATA, NTDB_DATA, void *),
void *data)
{
ntdb_off_t *fr = NULL, *used = NULL;
ntdb_off_t ft = 0, recovery = 0;
size_t num_free = 0, num_used = 0, num_found = 0, num_ftables = 0,
num_capabilities = 0;
uint64_t features = 0;
enum NTDB_ERROR ecode;
if (ntdb->flags & NTDB_CANT_CHECK) {
return ntdb_logerr(ntdb, NTDB_SUCCESS, NTDB_LOG_WARNING,
"ntdb_check: database has unknown capability,"
" cannot check.");
}
ecode = ntdb_allrecord_lock(ntdb, F_RDLCK, NTDB_LOCK_WAIT, false);
if (ecode != NTDB_SUCCESS) {
return ecode;
}
ecode = ntdb_lock_expand(ntdb, F_RDLCK);
if (ecode != NTDB_SUCCESS) {
ntdb_allrecord_unlock(ntdb, F_RDLCK);
return ecode;
}
ecode = check_header(ntdb, &recovery, &features, &num_capabilities);
if (ecode != NTDB_SUCCESS)
goto out;
/* First we do a linear scan, checking all records. */
ecode = check_linear(ntdb, &used, &num_used, &fr, &num_free, features,
recovery);
if (ecode != NTDB_SUCCESS)
goto out;
for (ft = first_ftable(ntdb); ft; ft = next_ftable(ntdb, ft)) {
if (NTDB_OFF_IS_ERR(ft)) {
ecode = NTDB_OFF_TO_ERR(ft);
goto out;
}
ecode = check_free_table(ntdb, ft, num_ftables, fr, num_free,
&num_found);
if (ecode != NTDB_SUCCESS)
goto out;
num_ftables++;
}
/* FIXME: Check key uniqueness? */
ecode = check_hash(ntdb, used, num_used, num_ftables + num_capabilities,
check, data);
if (ecode != NTDB_SUCCESS)
goto out;
if (num_found != num_free) {
ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
"ntdb_check: Not all entries are in"
" free table");
}
out:
ntdb_allrecord_unlock(ntdb, F_RDLCK);
ntdb_unlock_expand(ntdb, F_RDLCK);
ntdb->free_fn(fr, ntdb->alloc_data);
ntdb->free_fn(used, ntdb->alloc_data);
return ecode;
}
ntdb-1.0/configure 0000775 0000000 0000000 00000000650 12241515307 0014211 0 ustar 00root root 0000000 0000000 #!/bin/sh
PREVPATH=`dirname $0`
if [ -f $PREVPATH/../../buildtools/bin/waf ]; then
WAF=../../buildtools/bin/waf
elif [ -f $PREVPATH/buildtools/bin/waf ]; then
WAF=./buildtools/bin/waf
else
echo "replace: Unable to find waf"
exit 1
fi
# using JOBS=1 gives maximum compatibility with
# systems like AIX which have broken threading in python
JOBS=1
export JOBS
cd . || exit 1
$WAF configure "$@" || exit 1
cd $PREVPATH
ntdb-1.0/doc/ 0000775 0000000 0000000 00000000000 12241515307 0013046 5 ustar 00root root 0000000 0000000 ntdb-1.0/doc/TDB_porting.txt 0000664 0000000 0000000 00000027601 12241515307 0015770 0 ustar 00root root 0000000 0000000 Interface differences between TDB and NTDB.
- ntdb shares 'struct TDB_DATA' with tdb, but TDB defines the TDB_DATA
typedef, whereas ntdb defines NTDB_DATA (ie. both are compatible).
If you include both ntdb.h and tdb.h, #include tdb.h first,
otherwise you'll get a compile error when tdb.h re-defined struct
TDB_DATA.
Example:
#include
#include
- ntdb functions return NTDB_SUCCESS (ie 0) on success, and a negative
error on failure, whereas tdb functions returned 0 on success, and
-1 on failure. tdb then used tdb_error() to determine the error;
this API is nasty if we ever want to support threads, so is not supported.
Example:
#include
#include
void tdb_example(struct tdb_context *tdb, TDB_DATA key, TDB_DATA d)
{
if (tdb_store(tdb, key, d) == -1) {
printf("store failed: %s\n", tdb_errorstr(tdb));
}
}
void ntdb_example(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA d)
{
enum NTDB_ERROR e;
e = ntdb_store(ntdb, key, d);
if (e) {
printf("store failed: %s\n", ntdb_errorstr(e));
}
}
- ntdb's ntdb_fetch() returns an error, tdb's returned the data directly
(or tdb_null, and you were supposed to check tdb_error() to find out why).
Example:
#include
#include
void tdb_example(struct tdb_context *tdb, TDB_DATA key)
{
TDB_DATA data;
data = tdb_fetch(tdb, key);
if (!data.dptr) {
printf("fetch failed: %s\n", tdb_errorstr(tdb));
}
}
void ntdb_example(struct ntdb_context *ntdb, NTDB_DATA key)
{
NTDB_DATA data;
enum NTDB_ERROR e;
e = ntdb_fetch(ntdb, key, &data);
if (e) {
printf("fetch failed: %s\n", ntdb_errorstr(e));
}
}
- ntdb's ntdb_nextkey() frees the old key's dptr, in tdb you needed to do
this manually.
Example:
#include
#include
void tdb_example(struct tdb_context *tdb)
{
TDB_DATA key, next, data;
for (key = tdb_firstkey(tdb); key.dptr; key = next) {
printf("Got key!\n");
next = tdb_nextkey(tdb, key);
free(key.dptr);
}
}
void ntdb_example(struct ntdb_context *ntdb)
{
NTDB_DATA k, data;
enum NTDB_ERROR e;
for (e = ntdb_firstkey(ntdb,&k); !e; e = ntdb_nextkey(ntdb,&k))
printf("Got key!\n");
}
- Unlike tdb_open/tdb_open_ex, ntdb_open does not allow NULL names,
even for NTDB_INTERNAL dbs, and thus ntdb_name() never returns NULL.
Example:
#include
#include
struct tdb_context *tdb_example(void)
{
return tdb_open(NULL, 0, TDB_INTERNAL, O_RDWR, 0);
}
struct ntdb_context *ntdb_example(void)
{
return ntdb_open("example", NTDB_INTERNAL, O_RDWR, 0);
}
- ntdb uses a linked list of attribute structures to implement logging and
alternate hashes. tdb used tdb_open_ex, which was not extensible.
Example:
#include
#include
/* Custom hash function */
static unsigned int my_tdb_hash_func(TDB_DATA *key)
{
return key->dsize;
}
struct tdb_context *tdb_example(void)
{
return tdb_open_ex("example.tdb", 0, TDB_DEFAULT,
O_CREAT|O_RDWR, 0600, NULL, my_hash_func);
}
/* Custom hash function */
static unsigned int my_ntdb_hash_func(const void *key, size_t len,
uint32_t seed, void *data)
{
return len;
}
struct ntdb_context *ntdb_example(void)
{
union ntdb_attribute hash;
hash.base.attr = NTDB_ATTRIBUTE_HASH;
hash.base.next = NULL;
hash.hash.fn = my_ntdb_hash_func;
return ntdb_open("example.ntdb", NTDB_DEFAULT,
O_CREAT|O_RDWR, 0600, &hash);
}
- tdb's tdb_open/tdb_open_ex took an explicit hash size, defaulting to
131. ntdb's uses an attribute for this, defaulting to 8192.
Example:
#include
#include
struct tdb_context *tdb_example(void)
{
return tdb_open("example.tdb", 10007, TDB_DEFAULT,
O_CREAT|O_RDWR, 0600);
}
struct ntdb_context *ntdb_example(void)
{
union ntdb_attribute hashsize;
hashsize.base.attr = NTDB_ATTRIBUTE_HASHSIZE;
hashsize.base.next = NULL;
hashsize.hashsize.size = 16384;
return ntdb_open("example.ntdb", NTDB_DEFAULT,
O_CREAT|O_RDWR, 0600, &hashsize);
}
- ntdb's log function is simpler than tdb's log function. The string
is already formatted, is not terminated by a '\n', and it takes an
enum ntdb_log_level not a tdb_debug_level, and which has only three
values: NTDB_LOG_ERROR, NTDB_LOG_USE_ERROR and NTDB_LOG_WARNING.
#include
#include
static void tdb_log(struct tdb_context *tdb,
enum tdb_debug_level level, const char *fmt, ...)
{
va_list ap;
const char *name;
switch (level) {
case TDB_DEBUG_FATAL:
fprintf(stderr, "FATAL: ");
break;
case TDB_DEBUG_ERROR:
fprintf(stderr, "ERROR: ");
break;
case TDB_DEBUG_WARNING:
fprintf(stderr, "WARNING: ");
break;
case TDB_DEBUG_TRACE:
/* Don't print out tracing. */
return;
}
name = tdb_name(tdb);
if (!name) {
name = "unnamed";
}
fprintf(stderr, "tdb(%s):", name);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
}
struct tdb_context *tdb_example(void)
{
struct tdb_logging_context lctx;
lctx.log_fn = tdb_log;
return tdb_open_ex("example.tdb", 0, TDB_DEFAULT,
O_CREAT|O_RDWR, 0600, &lctx, NULL);
}
static void ntdb_log(struct ntdb_context *ntdb,
enum ntdb_log_level level,
enum NTDB_ERROR ecode,
const char *message,
void *data)
{
switch (level) {
case NTDB_LOG_ERROR:
fprintf(stderr, "ERROR: ");
break;
case NTDB_LOG_USE_ERROR:
/* We made a mistake, so abort. */
abort();
break;
case NTDB_LOG_WARNING:
fprintf(stderr, "WARNING: ");
break;
}
fprintf(stderr, "ntdb(%s):%s:%s\n",
ntdb_name(ntdb), ntdb_errorstr(ecode), message);
}
struct ntdb_context *ntdb_example(void)
{
union ntdb_attribute log;
log.base.attr = NTDB_ATTRIBUTE_LOG;
log.base.next = NULL;
log.log.fn = ntdb_log;
return ntdb_open("example.ntdb", NTDB_DEFAULT,
O_CREAT|O_RDWR, 0600, &log);
}
- ntdb provides ntdb_deq() for comparing two NTDB_DATA, and ntdb_mkdata() for
creating an NTDB_DATA.
#include
#include
void tdb_example(struct tdb_context *tdb)
{
TDB_DATA data, key;
key.dsize = strlen("hello");
key.dptr = "hello";
data = tdb_fetch(tdb, key);
if (data.dsize == key.dsize
&& !memcmp(data.dptr, key.dptr, key.dsize))
printf("key is same as data\n");
}
free(data.dptr);
}
void ntdb_example(struct ntdb_context *ntdb)
{
NTDB_DATA data, key;
key = ntdb_mkdata("hello", strlen("hello"));
if (ntdb_fetch(ntdb, key, &data) == NTDB_SUCCESS) {
if (ntdb_deq(key, data)) {
printf("key is same as data\n");
}
free(data.dptr);
}
}
- ntdb's ntdb_parse_record() takes a type-checked callback data
pointer, not a void * (though a void * pointer still works). The
callback function is allowed to do read operations on the database,
or write operations if you first call ntdb_lockall(). TDB's
tdb_parse_record() did not allow any database access within the
callback, could crash if you tried.
Example:
#include
#include
static int tdb_parser(TDB_DATA key, TDB_DATA data, void *private_data)
{
TDB_DATA *expect = private_data;
return data.dsize == expect->dsize
&& !memcmp(data.dptr, expect->dptr, data.dsize);
}
void tdb_example(struct tdb_context *tdb, TDB_DATA key, NTDB_DATA d)
{
switch (tdb_parse_record(tdb, key, tdb_parser, &d)) {
case -1:
printf("parse failed: %s\n", tdb_errorstr(tdb));
break;
case 0:
printf("data was different!\n");
break;
case 1:
printf("data was same!\n");
break;
}
}
static int ntdb_parser(TDB_DATA key, TDB_DATA data, TDB_DATA *expect)
{
return ntdb_deq(data, *expect);
}
void ntdb_example(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA d)
{
enum NTDB_ERROR e;
e = tdb_parse_record(tdb, key, tdb_parser, &d);
switch (e) {
case 0:
printf("data was different!\n");
break;
case 1:
printf("data was same!\n");
break;
default:
printf("parse failed: %s\n", ntdb_errorstr(e));
break;
}
}
- ntdb does locking on read-only databases (ie. O_RDONLY passed to ntdb_open).
tdb did not: use the NTDB_NOLOCK flag if you want to suppress locking.
Example:
#include
#include
struct tdb_context *tdb_example(void)
{
return tdb_open("example.tdb", 0, TDB_DEFAULT, O_RDONLY, 0);
}
struct ntdb_context *ntdb_example(void)
{
return ntdb_open("example.ntdb", NTDB_NOLOCK, O_RDONLY, NULL);
}
- Failure inside a transaction (such as a lock function failing) does
not implicitly cancel the transaction; you still need to call
ntdb_transaction_cancel().
#include
#include
void tdb_example(struct tdb_context *tdb, TDB_DATA key, TDB_DATA d)
{
if (tdb_transaction_start(tdb) == -1) {
printf("transaction failed: %s\n", tdb_errorstr(tdb));
return;
}
if (tdb_store(tdb, key, d) == -1) {
printf("store failed: %s\n", tdb_errorstr(tdb));
return;
}
if (tdb_transaction_commit(tdb) == -1) {
printf("commit failed: %s\n", tdb_errorstr(tdb));
}
}
void ntdb_example(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA d)
{
enum NTDB_ERROR e;
e = ntdb_transaction_start(ntdb);
if (e) {
printf("transaction failed: %s\n", ntdb_errorstr(e));
return;
}
e = ntdb_store(ntdb, key, d);
if (e) {
printf("store failed: %s\n", ntdb_errorstr(e));
ntdb_transaction_cancel(ntdb);
}
e = ntdb_transaction_commit(ntdb);
if (e) {
printf("commit failed: %s\n", ntdb_errorstr(e));
}
}
- There is no NTDB_CLEAR_IF_FIRST flag; it has severe scalability and
API problems. If necessary, you can emulate this by using the open
hook and placing a 1-byte lock at offset 4. If your program forks
and exits, you will need to place this lock again in the child before
the parent exits.
Example:
#include
#include
struct tdb_context *tdb_example(void)
{
return tdb_open("example.tdb", 0, TDB_CLEAR_IF_FIRST,
O_CREAT|O_RDWR, 0600);
}
static enum NTDB_ERROR clear_if_first(int fd, void *unused)
{
/* We hold a lock offset 4 always, so we can tell if
* anyone else is. */
struct flock fl;
fl.l_type = F_WRLCK;
fl.l_whence = SEEK_SET;
fl.l_start = 4; /* ACTIVE_LOCK */
fl.l_len = 1;
if (fcntl(fd, F_SETLK, &fl) == 0) {
/* We must be first ones to open it! Clear it. */
if (ftruncate(fd, 0) != 0) {
return NTDB_ERR_IO;
}
}
fl.l_type = F_RDLCK;
if (fcntl(fd, F_SETLKW, &fl) != 0) {
return NTDB_ERR_IO;
}
return NTDB_SUCCESS;
}
struct ntdb_context *ntdb_example(void)
{
union ntdb_attribute open_attr;
open_attr.openhook.base.attr = NTDB_ATTRIBUTE_OPENHOOK;
open_attr.openhook.base.next = NULL;
open_attr.openhook.fn = clear_if_first;
return ntdb_open("example.ntdb", NTDB_DEFAULT,
O_CREAT|O_RDWR, 0600, &open_attr);
}
- ntdb traversals are not reliable if the database is changed during
the traversal, ie your traversal may not cover all elements, or may
cover elements multiple times. As a special exception, deleting the
current record within ntdb_traverse() is reliable.
- There is no ntdb_traverse_read, since ntdb_traverse does not hold
a lock across the entire traversal anyway. If you want to make sure
that your traversal function does not write to the database, you can
set and clear the NTDB_RDONLY flag around the traversal.
- ntdb does not need tdb_reopen() or tdb_reopen_all(). If you call
fork() after during certain operations the child should close the
ntdb, or complete the operations before continuing to use the tdb:
ntdb_transaction_start(): child must ntdb_transaction_cancel()
ntdb_lockall(): child must call ntdb_unlockall()
ntdb_lockall_read(): child must call ntdb_unlockall_read()
ntdb_chainlock(): child must call ntdb_chainunlock()
ntdb_parse() callback: child must return from ntdb_parse()
- ntdb will not open a non-ntdb file, even if O_CREAT is specified. tdb
will overwrite an unknown file in that case.
ntdb-1.0/doc/design.lyx 0000664 0000000 0000000 00000203407 12241515307 0015063 0 ustar 00root root 0000000 0000000 #LyX 2.0 created this file. For more info see http://www.lyx.org/
\lyxformat 413
\begin_document
\begin_header
\textclass article
\use_default_options true
\maintain_unincluded_children false
\language english
\language_package default
\inputencoding auto
\fontencoding global
\font_roman default
\font_sans default
\font_typewriter default
\font_default_family default
\use_non_tex_fonts false
\font_sc false
\font_osf false
\font_sf_scale 100
\font_tt_scale 100
\graphics default
\default_output_format default
\output_sync 0
\bibtex_command default
\index_command default
\paperfontsize default
\use_hyperref false
\papersize default
\use_geometry false
\use_amsmath 1
\use_esint 1
\use_mhchem 1
\use_mathdots 1
\cite_engine basic
\use_bibtopic false
\use_indices false
\paperorientation portrait
\suppress_date false
\use_refstyle 0
\index Index
\shortcut idx
\color #008000
\end_index
\secnumdepth 3
\tocdepth 3
\paragraph_separation indent
\paragraph_indentation default
\quotes_language english
\papercolumns 1
\papersides 1
\paperpagestyle default
\tracking_changes true
\output_changes true
\html_math_output 0
\html_css_as_file 0
\html_be_strict false
\end_header
\begin_body
\begin_layout Title
NTDB: Redesigning The Trivial DataBase
\end_layout
\begin_layout Author
Rusty Russell, IBM Corporation
\end_layout
\begin_layout Date
19 June 2012
\end_layout
\begin_layout Abstract
The Trivial DataBase on-disk format is 32 bits; with usage cases heading
towards the 4G limit, that must change.
This required breakage provides an opportunity to revisit TDB's other design
decisions and reassess them.
\end_layout
\begin_layout Section
Introduction
\end_layout
\begin_layout Standard
The Trivial DataBase was originally written by Andrew Tridgell as a simple
key/data pair storage system with the same API as dbm, but allowing multiple
readers and writers while being small enough (< 1000 lines of C) to include
in SAMBA.
The simple design created in 1999 has proven surprisingly robust and performant
, used in Samba versions 3 and 4 as well as numerous other projects.
Its useful life was greatly increased by the (backwards-compatible!) addition
of transaction support in 2005.
\end_layout
\begin_layout Standard
The wider variety and greater demands of TDB-using code has lead to some
organic growth of the API, as well as some compromises on the implementation.
None of these, by themselves, are seen as show-stoppers, but the cumulative
effect is to a loss of elegance over the initial, simple TDB implementation.
Here is a table of the approximate number of lines of implementation code
and number of API functions at the end of each year:
\end_layout
\begin_layout Standard
\begin_inset Tabular
\begin_inset Text
\begin_layout Plain Layout
Year End
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
API Functions
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
Lines of C Code Implementation
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
1999
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
13
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
1195
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2000
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
24
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
1725
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2001
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
32
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2228
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2002
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
35
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2481
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2003
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
35
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2552
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2004
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
40
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2584
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2005
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
38
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2647
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2006
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
52
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
3754
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2007
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
66
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
4398
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2008
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
71
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
4768
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
2009
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
73
\end_layout
\end_inset
|
\begin_inset Text
\begin_layout Plain Layout
5715
\end_layout
\end_inset
|
\end_inset
\end_layout
\begin_layout Standard
This review is an attempt to catalog and address all the known issues with
TDB and create solutions which address the problems without significantly
increasing complexity; all involved are far too aware of the dangers of
second system syndrome in rewriting a successful project like this.
\end_layout
\begin_layout Standard
Note: the final decision was to make ntdb a separate library, with a separarate
'ntdb' namespace so both can potentially be linked together.
This document still refers to
\begin_inset Quotes eld
\end_inset
tdb
\begin_inset Quotes erd
\end_inset
everywhere, for simplicity.
\end_layout
\begin_layout Section
API Issues
\end_layout
\begin_layout Subsection
tdb_open_ex Is Not Expandable
\end_layout
\begin_layout Standard
The tdb_open() call was expanded to tdb_open_ex(), which added an optional
hashing function and an optional logging function argument.
Additional arguments to open would require the introduction of a tdb_open_ex2
call etc.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\begin_inset CommandInset label
LatexCommand label
name "attributes"
\end_inset
\end_layout
\begin_layout Standard
tdb_open() will take a linked-list of attributes:
\end_layout
\begin_layout LyX-Code
enum tdb_attribute {
\end_layout
\begin_layout LyX-Code
TDB_ATTRIBUTE_LOG = 0,
\end_layout
\begin_layout LyX-Code
TDB_ATTRIBUTE_HASH = 1
\end_layout
\begin_layout LyX-Code
};
\end_layout
\begin_layout LyX-Code
struct tdb_attribute_base {
\end_layout
\begin_layout LyX-Code
enum tdb_attribute attr;
\end_layout
\begin_layout LyX-Code
union tdb_attribute *next;
\end_layout
\begin_layout LyX-Code
};
\end_layout
\begin_layout LyX-Code
struct tdb_attribute_log {
\end_layout
\begin_layout LyX-Code
struct tdb_attribute_base base; /* .attr = TDB_ATTRIBUTE_LOG */
\end_layout
\begin_layout LyX-Code
tdb_log_func log_fn;
\end_layout
\begin_layout LyX-Code
void *log_private;
\end_layout
\begin_layout LyX-Code
};
\end_layout
\begin_layout LyX-Code
struct tdb_attribute_hash {
\end_layout
\begin_layout LyX-Code
struct tdb_attribute_base base; /* .attr = TDB_ATTRIBUTE_HASH */
\end_layout
\begin_layout LyX-Code
tdb_hash_func hash_fn;
\end_layout
\begin_layout LyX-Code
void *hash_private;
\end_layout
\begin_layout LyX-Code
};
\end_layout
\begin_layout LyX-Code
union tdb_attribute {
\end_layout
\begin_layout LyX-Code
struct tdb_attribute_base base;
\end_layout
\begin_layout LyX-Code
struct tdb_attribute_log log;
\end_layout
\begin_layout LyX-Code
struct tdb_attribute_hash hash;
\end_layout
\begin_layout LyX-Code
};
\end_layout
\begin_layout Standard
This allows future attributes to be added, even if this expands the size
of the union.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
tdb_traverse Makes Impossible Guarantees
\end_layout
\begin_layout Standard
tdb_traverse (and tdb_firstkey/tdb_nextkey) predate transactions, and it
was thought that it was important to guarantee that all records which exist
at the start and end of the traversal would be included, and no record
would be included twice.
\end_layout
\begin_layout Standard
This adds complexity (see
\begin_inset CommandInset ref
LatexCommand ref
reference "Reliable-Traversal-Adds"
\end_inset
) and does not work anyway for records which are altered (in particular,
those which are expanded may be effectively deleted and re-added behind
the traversal).
\end_layout
\begin_layout Subsubsection
\begin_inset CommandInset label
LatexCommand label
name "traverse-Proposed-Solution"
\end_inset
Proposed Solution
\end_layout
\begin_layout Standard
Abandon the guarantee.
You will see every record if no changes occur during your traversal, otherwise
you will see some subset.
You can prevent changes by using a transaction or the locking API.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
Delete-during-traverse will still delete every record, too (assuming no
other changes).
\end_layout
\begin_layout Subsection
Nesting of Transactions Is Fraught
\end_layout
\begin_layout Standard
TDB has alternated between allowing nested transactions and not allowing
them.
Various paths in the Samba codebase assume that transactions will nest,
and in a sense they can: the operation is only committed to disk when the
outer transaction is committed.
There are two problems, however:
\end_layout
\begin_layout Enumerate
Canceling the inner transaction will cause the outer transaction commit
to fail, and will not undo any operations since the inner transaction began.
This problem is soluble with some additional internal code.
\end_layout
\begin_layout Enumerate
An inner transaction commit can be cancelled by the outer transaction.
This is desirable in the way which Samba's database initialization code
uses transactions, but could be a surprise to any users expecting a successful
transaction commit to expose changes to others.
\end_layout
\begin_layout Standard
The current solution is to specify the behavior at tdb_open(), with the
default currently that nested transactions are allowed.
This flag can also be changed at runtime.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
Given the usage patterns, it seems that the
\begin_inset Quotes eld
\end_inset
least-surprise
\begin_inset Quotes erd
\end_inset
behavior of disallowing nested transactions should become the default.
Additionally, it seems the outer transaction is the only code which knows
whether inner transactions should be allowed, so a flag to indicate this
could be added to tdb_transaction_start.
However, this behavior can be simulated with a wrapper which uses tdb_add_flags
() and tdb_remove_flags(), so the API should not be expanded for this relatively
-obscure case.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete; the nesting flag has been removed.
\end_layout
\begin_layout Subsection
Incorrect Hash Function is Not Detected
\end_layout
\begin_layout Standard
tdb_open_ex() allows the calling code to specify a different hash function
to use, but does not check that all other processes accessing this tdb
are using the same hash function.
The result is that records are missing from tdb_fetch().
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
The header should contain an example hash result (eg.
the hash of 0xdeadbeef), and tdb_open_ex() should check that the given
hash function produces the same answer, or fail the tdb_open call.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
tdb_set_max_dead/TDB_VOLATILE Expose Implementation
\end_layout
\begin_layout Standard
In response to scalability issues with the free list (
\begin_inset CommandInset ref
LatexCommand ref
reference "TDB-Freelist-Is"
\end_inset
) two API workarounds have been incorporated in TDB: tdb_set_max_dead()
and the TDB_VOLATILE flag to tdb_open.
The latter actually calls the former with an argument of
\begin_inset Quotes eld
\end_inset
5
\begin_inset Quotes erd
\end_inset
.
\end_layout
\begin_layout Standard
This code allows deleted records to accumulate without putting them in the
free list.
On delete we iterate through each chain and free them in a batch if there
are more than max_dead entries.
These are never otherwise recycled except as a side-effect of a tdb_repack.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
With the scalability problems of the freelist solved, this API can be removed.
The TDB_VOLATILE flag may still be useful as a hint that store and delete
of records will be at least as common as fetch in order to allow some internal
tuning, but initially will become a no-op.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
Unknown flags cause tdb_open() to fail as well, so they can be detected
at runtime.
\end_layout
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "TDB-Files-Cannot"
\end_inset
TDB Files Cannot Be Opened Multiple Times In The Same Process
\end_layout
\begin_layout Standard
No process can open the same TDB twice; we check and disallow it.
This is an unfortunate side-effect of fcntl locks, which operate on a per-file
rather than per-file-descriptor basis, and do not nest.
Thus, closing any file descriptor on a file clears all the locks obtained
by this process, even if they were placed using a different file descriptor!
\end_layout
\begin_layout Standard
Note that even if this were solved, deadlock could occur if operations were
nested: this is a more manageable programming error in most cases.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
We could lobby POSIX to fix the perverse rules, or at least lobby Linux
to violate them so that the most common implementation does not have this
restriction.
This would be a generally good idea for other fcntl lock users.
\end_layout
\begin_layout Standard
Samba uses a wrapper which hands out the same tdb_context to multiple callers
if this happens, and does simple reference counting.
We should do this inside the tdb library, which already emulates lock nesting
internally; it would need to recognize when deadlock occurs within a single
process.
This would create a new failure mode for tdb operations (while we currently
handle locking failures, they are impossible in normal use and a process
encountering them can do little but give up).
\end_layout
\begin_layout Standard
I do not see benefit in an additional tdb_open flag to indicate whether
re-opening is allowed, as though there may be some benefit to adding a
call to detect when a tdb_context is shared, to allow other to create such
an API.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
TDB API Is Not POSIX Thread-safe
\end_layout
\begin_layout Standard
The TDB API uses an error code which can be queried after an operation to
determine what went wrong.
This programming model does not work with threads, unless specific additional
guarantees are given by the implementation.
In addition, even otherwise-independent threads cannot open the same TDB
(as in
\begin_inset CommandInset ref
LatexCommand ref
reference "TDB-Files-Cannot"
\end_inset
).
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
Reachitecting the API to include a tdb_errcode pointer would be a great
deal of churn, but fortunately most functions return 0 on success and -1
on error: we can change these to return 0 on success and a negative error
code on error, and the API remains similar to previous.
The tdb_fetch, tdb_firstkey and tdb_nextkey functions need to take a TDB_DATA
pointer and return an error code.
It is also simpler to have tdb_nextkey replace its key argument in place,
freeing up any old .dptr.
\end_layout
\begin_layout Standard
Internal locking is required to make sure that fcntl locks do not overlap
between threads, and also that the global list of tdbs is maintained.
\end_layout
\begin_layout Standard
The aim is that building tdb with -DTDB_PTHREAD will result in a pthread-safe
version of the library, and otherwise no overhead will exist.
Alternatively, a hooking mechanism similar to that proposed for
\begin_inset CommandInset ref
LatexCommand ref
reference "Proposed-Solution-locking-hook"
\end_inset
could be used to enable pthread locking at runtime.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Incomplete; API has been changed but thread safety has not been implemented.
\end_layout
\begin_layout Subsection
*_nonblock Functions And *_mark Functions Expose Implementation
\end_layout
\begin_layout Standard
CTDB
\begin_inset Foot
status collapsed
\begin_layout Plain Layout
Clustered TDB, see http://ctdb.samba.org
\end_layout
\end_inset
wishes to operate on TDB in a non-blocking manner.
This is currently done as follows:
\end_layout
\begin_layout Enumerate
Call the _nonblock variant of an API function (eg.
tdb_lockall_nonblock).
If this fails:
\end_layout
\begin_layout Enumerate
Fork a child process, and wait for it to call the normal variant (eg.
tdb_lockall).
\end_layout
\begin_layout Enumerate
If the child succeeds, call the _mark variant to indicate we already have
the locks (eg.
tdb_lockall_mark).
\end_layout
\begin_layout Enumerate
Upon completion, tell the child to release the locks (eg.
tdb_unlockall).
\end_layout
\begin_layout Enumerate
Indicate to tdb that it should consider the locks removed (eg.
tdb_unlockall_mark).
\end_layout
\begin_layout Standard
There are several issues with this approach.
Firstly, adding two new variants of each function clutters the API for
an obscure use, and so not all functions have three variants.
Secondly, it assumes that all paths of the functions ask for the same locks,
otherwise the parent process will have to get a lock which the child doesn't
have under some circumstances.
I don't believe this is currently the case, but it constrains the implementatio
n.
\end_layout
\begin_layout Subsubsection
\begin_inset CommandInset label
LatexCommand label
name "Proposed-Solution-locking-hook"
\end_inset
Proposed Solution
\end_layout
\begin_layout Standard
Implement a hook for locking methods, so that the caller can control the
calls to create and remove fcntl locks.
In this scenario, ctdbd would operate as follows:
\end_layout
\begin_layout Enumerate
Call the normal API function, eg tdb_lockall().
\end_layout
\begin_layout Enumerate
When the lock callback comes in, check if the child has the lock.
Initially, this is always false.
If so, return 0.
Otherwise, try to obtain it in non-blocking mode.
If that fails, return EWOULDBLOCK.
\end_layout
\begin_layout Enumerate
Release locks in the unlock callback as normal.
\end_layout
\begin_layout Enumerate
If tdb_lockall() fails, see if we recorded a lock failure; if so, call the
child to repeat the operation.
\end_layout
\begin_layout Enumerate
The child records what locks it obtains, and returns that information to
the parent.
\end_layout
\begin_layout Enumerate
When the child has succeeded, goto 1.
\end_layout
\begin_layout Standard
This is flexible enough to handle any potential locking scenario, even when
lock requirements change.
It can be optimized so that the parent does not release locks, just tells
the child which locks it doesn't need to obtain.
\end_layout
\begin_layout Standard
It also keeps the complexity out of the API, and in ctdbd where it is needed.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
tdb_chainlock Functions Expose Implementation
\end_layout
\begin_layout Standard
tdb_chainlock locks some number of records, including the record indicated
by the given key.
This gave atomicity guarantees; no-one can start a transaction, alter,
read or delete that key while the lock is held.
\end_layout
\begin_layout Standard
It also makes the same guarantee for any other key in the chain, which is
an internal implementation detail and potentially a cause for deadlock.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
None.
It would be nice to have an explicit single entry lock which effected no
other keys.
Unfortunately, this won't work for an entry which doesn't exist.
Thus while chainlock may be implemented more efficiently for the existing
case, it will still have overlap issues with the non-existing case.
So it is best to keep the current (lack of) guarantee about which records
will be effected to avoid constraining our implementation.
\end_layout
\begin_layout Subsection
Signal Handling is Not Race-Free
\end_layout
\begin_layout Standard
The tdb_setalarm_sigptr() call allows the caller's signal handler to indicate
that the tdb locking code should return with a failure, rather than trying
again when a signal is received (and errno == EAGAIN).
This is usually used to implement timeouts.
\end_layout
\begin_layout Standard
Unfortunately, this does not work in the case where the signal is received
before the tdb code enters the fcntl() call to place the lock: the code
will sleep within the fcntl() code, unaware that the signal wants it to
exit.
In the case of long timeouts, this does not happen in practice.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
The locking hooks proposed in
\begin_inset CommandInset ref
LatexCommand ref
reference "Proposed-Solution-locking-hook"
\end_inset
would allow the user to decide on whether to fail the lock acquisition
on a signal.
This allows the caller to choose their own compromise: they could narrow
the race by checking immediately before the fcntl call.
\begin_inset Foot
status collapsed
\begin_layout Plain Layout
It may be possible to make this race-free in some implementations by having
the signal handler alter the struct flock to make it invalid.
This will cause the fcntl() lock call to fail with EINVAL if the signal
occurs before the kernel is entered, otherwise EAGAIN.
\end_layout
\end_inset
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
The API Uses Gratuitous Typedefs, Capitals
\end_layout
\begin_layout Standard
typedefs are useful for providing source compatibility when types can differ
across implementations, or arguably in the case of function pointer definitions
which are hard for humans to parse.
Otherwise it is simply obfuscation and pollutes the namespace.
\end_layout
\begin_layout Standard
Capitalization is usually reserved for compile-time constants and macros.
\end_layout
\begin_layout Description
TDB_CONTEXT There is no reason to use this over 'struct tdb_context'; the
definition isn't visible to the API user anyway.
\end_layout
\begin_layout Description
TDB_DATA There is no reason to use this over struct TDB_DATA; the struct
needs to be understood by the API user.
\end_layout
\begin_layout Description
struct
\begin_inset space ~
\end_inset
TDB_DATA This would normally be called 'struct tdb_data'.
\end_layout
\begin_layout Description
enum
\begin_inset space ~
\end_inset
TDB_ERROR Similarly, this would normally be enum tdb_error.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
None.
Introducing lower case variants would please pedants like myself, but if
it were done the existing ones should be kept.
There is little point forcing a purely cosmetic change upon tdb users.
\end_layout
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "tdb_log_func-Doesnt-Take"
\end_inset
tdb_log_func Doesn't Take The Private Pointer
\end_layout
\begin_layout Standard
For API compatibility reasons, the logging function needs to call tdb_get_loggin
g_private() to retrieve the pointer registered by the tdb_open_ex for logging.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
It should simply take an extra argument, since we are prepared to break
the API/ABI.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
Various Callback Functions Are Not Typesafe
\end_layout
\begin_layout Standard
The callback functions in tdb_set_logging_function (after
\begin_inset CommandInset ref
LatexCommand ref
reference "tdb_log_func-Doesnt-Take"
\end_inset
is resolved), tdb_parse_record, tdb_traverse, tdb_traverse_read and tdb_check
all take void * and must internally convert it to the argument type they
were expecting.
\end_layout
\begin_layout Standard
If this type changes, the compiler will not produce warnings on the callers,
since it only sees void *.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
With careful use of macros, we can create callback functions which give
a warning when used on gcc and the types of the callback and its private
argument differ.
Unsupported compilers will not give a warning, which is no worse than now.
In addition, the callbacks become clearer, as they need not use void *
for their parameter.
\end_layout
\begin_layout Standard
See CCAN's typesafe_cb module at http://ccan.ozlabs.org/info/typesafe_cb.html
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
TDB_CLEAR_IF_FIRST Must Be Specified On All Opens, tdb_reopen_all Problematic
\end_layout
\begin_layout Standard
The TDB_CLEAR_IF_FIRST flag to tdb_open indicates that the TDB file should
be cleared if the caller discovers it is the only process with the TDB
open.
However, if any caller does not specify TDB_CLEAR_IF_FIRST it will not
be detected, so will have the TDB erased underneath them (usually resulting
in a crash).
\end_layout
\begin_layout Standard
There is a similar issue on fork(); if the parent exits (or otherwise closes
the tdb) before the child calls tdb_reopen_all() to establish the lock
used to indicate the TDB is opened by someone, a TDB_CLEAR_IF_FIRST opener
at that moment will believe it alone has opened the TDB and will erase
it.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
Remove TDB_CLEAR_IF_FIRST.
Other workarounds are possible, but see
\begin_inset CommandInset ref
LatexCommand ref
reference "TDB_CLEAR_IF_FIRST-Imposes-Performance"
\end_inset
.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
An open hook is provided to replicate this functionality if required.
\end_layout
\begin_layout Subsection
Extending The Header Is Difficult
\end_layout
\begin_layout Standard
We have reserved (zeroed) words in the TDB header, which can be used for
future features.
If the future features are compulsory, the version number must be updated
to prevent old code from accessing the database.
But if the future feature is optional, we have no way of telling if older
code is accessing the database or not.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
The header should contain a
\begin_inset Quotes eld
\end_inset
format variant
\begin_inset Quotes erd
\end_inset
value (64-bit).
This is divided into two 32-bit parts:
\end_layout
\begin_layout Enumerate
The lower part reflects the format variant understood by code accessing
the database.
\end_layout
\begin_layout Enumerate
The upper part reflects the format variant you must understand to write
to the database (otherwise you can only open for reading).
\end_layout
\begin_layout Standard
The latter field can only be written at creation time, the former should
be written under the OPEN_LOCK when opening the database for writing, if
the variant of the code is lower than the current lowest variant.
\end_layout
\begin_layout Standard
This should allow backwards-compatible features to be added, and detection
if older code (which doesn't understand the feature) writes to the database.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
Record Headers Are Not Expandible
\end_layout
\begin_layout Standard
If we later want to add (say) checksums on keys and data, it would require
another format change, which we'd like to avoid.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
We often have extra padding at the tail of a record.
If we ensure that the first byte (if any) of this padding is zero, we will
have a way for future changes to detect code which doesn't understand a
new format: the new code would write (say) a 1 at the tail, and thus if
there is no tail or the first byte is 0, we would know the extension is
not present on that record.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
TDB Does Not Use Talloc
\end_layout
\begin_layout Standard
Many users of TDB (particularly Samba) use the talloc allocator, and thus
have to wrap TDB in a talloc context to use it conveniently.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
The allocation within TDB is not complicated enough to justify the use of
talloc, and I am reluctant to force another (excellent) library on TDB
users.
Nonetheless a compromise is possible.
An attribute (see
\begin_inset CommandInset ref
LatexCommand ref
reference "attributes"
\end_inset
) can be added later to tdb_open() to provide an alternate allocation mechanism,
specifically for talloc but usable by any other allocator (which would
ignore the
\begin_inset Quotes eld
\end_inset
context
\begin_inset Quotes erd
\end_inset
argument).
\end_layout
\begin_layout Standard
This would form a talloc heirarchy as expected, but the caller would still
have to attach a destructor to the tdb context returned from tdb_open to
close it.
All TDB_DATA fields would be children of the tdb_context, and the caller
would still have to manage them (using talloc_free() or talloc_steal()).
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete, using the NTDB_ATTRIBUTE_ALLOCATOR attribute.
\end_layout
\begin_layout Section
Performance And Scalability Issues
\end_layout
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "TDB_CLEAR_IF_FIRST-Imposes-Performance"
\end_inset
TDB_CLEAR_IF_FIRST Imposes Performance Penalty
\end_layout
\begin_layout Standard
When TDB_CLEAR_IF_FIRST is specified, a 1-byte read lock is placed at offset
4 (aka.
the ACTIVE_LOCK).
While these locks never conflict in normal tdb usage, they do add substantial
overhead for most fcntl lock implementations when the kernel scans to detect
if a lock conflict exists.
This is often a single linked list, making the time to acquire and release
a fcntl lock O(N) where N is the number of processes with the TDB open,
not the number actually doing work.
\end_layout
\begin_layout Standard
In a Samba server it is common to have huge numbers of clients sitting idle,
and thus they have weaned themselves off the TDB_CLEAR_IF_FIRST flag.
\begin_inset Foot
status collapsed
\begin_layout Plain Layout
There is a flag to tdb_reopen_all() which is used for this optimization:
if the parent process will outlive the child, the child does not need the
ACTIVE_LOCK.
This is a workaround for this very performance issue.
\end_layout
\end_inset
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
Remove the flag.
It was a neat idea, but even trivial servers tend to know when they are
initializing for the first time and can simply unlink the old tdb at that
point.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
TDB Files Have a 4G Limit
\end_layout
\begin_layout Standard
This seems to be becoming an issue (so much for
\begin_inset Quotes eld
\end_inset
trivial
\begin_inset Quotes erd
\end_inset
!), particularly for ldb.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
A new, incompatible TDB format which uses 64 bit offsets internally rather
than 32 bit as now.
For simplicity of endian conversion (which TDB does on the fly if required),
all values will be 64 bit on disk.
In practice, some upper bits may be used for other purposes, but at least
56 bits will be available for file offsets.
\end_layout
\begin_layout Standard
tdb_open() will automatically detect the old version, and even create them
if TDB_VERSION6 is specified to tdb_open.
\end_layout
\begin_layout Standard
32 bit processes will still be able to access TDBs larger than 4G (assuming
that their off_t allows them to seek to 64 bits), they will gracefully
fall back as they fail to mmap.
This can happen already with large TDBs.
\end_layout
\begin_layout Standard
Old versions of tdb will fail to open the new TDB files (since 28 August
2009, commit 398d0c29290: prior to that any unrecognized file format would
be erased and initialized as a fresh tdb!)
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
TDB Records Have a 4G Limit
\end_layout
\begin_layout Standard
This has not been a reported problem, and the API uses size_t which can
be 64 bit on 64 bit platforms.
However, other limits may have made such an issue moot.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
Record sizes will be 64 bit, with an error returned on 32 bit platforms
which try to access such records (the current implementation would return
TDB_ERR_OOM in a similar case).
It seems unlikely that 32 bit keys will be a limitation, so the implementation
may not support this (see
\begin_inset CommandInset ref
LatexCommand ref
reference "sub:Records-Incur-A"
\end_inset
).
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
Hash Size Is Determined At TDB Creation Time
\end_layout
\begin_layout Standard
TDB contains a number of hash chains in the header; the number is specified
at creation time, and defaults to 131.
This is such a bottleneck on large databases (as each hash chain gets quite
long), that LDB uses 10,000 for this hash.
In general it is impossible to know what the 'right' answer is at database
creation time.
\end_layout
\begin_layout Subsubsection
\begin_inset CommandInset label
LatexCommand label
name "sub:Hash-Size-Solution"
\end_inset
Proposed Solution
\end_layout
\begin_layout Standard
After comprehensive performance testing on various scalable hash variants
\begin_inset Foot
status collapsed
\begin_layout Plain Layout
http://rusty.ozlabs.org/?p=89 and http://rusty.ozlabs.org/?p=94 This was annoying
because I was previously convinced that an expanding tree of hashes would
be very close to optimal.
\end_layout
\end_inset
, it became clear that it is hard to beat a straight linear hash table which
doubles in size when it reaches saturation.
Unfortunately, altering the hash table introduces serious locking complications
: the entire hash table needs to be locked to enlarge the hash table, and
others might be holding locks.
Particularly insidious are insertions done under tdb_chainlock.
\end_layout
\begin_layout Standard
Thus an expanding layered hash will be used: an array of hash groups, with
each hash group exploding into pointers to lower hash groups once it fills,
turning into a hash tree.
This has implications for locking: we must lock the entire group in case
we need to expand it, yet we don't know how deep the tree is at that point.
\end_layout
\begin_layout Standard
Note that bits from the hash table entries should be stolen to hold more
hash bits to reduce the penalty of collisions.
We can use the otherwise-unused lower 3 bits.
If we limit the size of the database to 64 exabytes, we can use the top
8 bits of the hash entry as well.
These 11 bits would reduce false positives down to 1 in 2000 which is more
than we need: we can use one of the bits to indicate that the extra hash
bits are valid.
This means we can choose not to re-hash all entries when we expand a hash
group; simply use the next bits we need and mark them invalid.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Ignore.
Scaling the hash automatically proved inefficient at small hash sizes;
we default to a 8192-element hash (changable via NTDB_ATTRIBUTE_HASHSIZE),
and when buckets clash we expand to an array of hash entries.
This scales slightly better than the tdb chain (due to the 8 top bits containin
g extra hash).
\end_layout
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "TDB-Freelist-Is"
\end_inset
TDB Freelist Is Highly Contended
\end_layout
\begin_layout Standard
TDB uses a single linked list for the free list.
Allocation occurs as follows, using heuristics which have evolved over
time:
\end_layout
\begin_layout Enumerate
Get the free list lock for this whole operation.
\end_layout
\begin_layout Enumerate
Multiply length by 1.25, so we always over-allocate by 25%.
\end_layout
\begin_layout Enumerate
Set the slack multiplier to 1.
\end_layout
\begin_layout Enumerate
Examine the current freelist entry: if it is > length but < the current
best case, remember it as the best case.
\end_layout
\begin_layout Enumerate
Multiply the slack multiplier by 1.05.
\end_layout
\begin_layout Enumerate
If our best fit so far is less than length * slack multiplier, return it.
The slack will be turned into a new free record if it's large enough.
\end_layout
\begin_layout Enumerate
Otherwise, go onto the next freelist entry.
\end_layout
\begin_layout Standard
Deleting a record occurs as follows:
\end_layout
\begin_layout Enumerate
Lock the hash chain for this whole operation.
\end_layout
\begin_layout Enumerate
Walk the chain to find the record, keeping the prev pointer offset.
\end_layout
\begin_layout Enumerate
If max_dead is non-zero:
\end_layout
\begin_deeper
\begin_layout Enumerate
Walk the hash chain again and count the dead records.
\end_layout
\begin_layout Enumerate
If it's more than max_dead, bulk free all the dead ones (similar to steps
4 and below, but the lock is only obtained once).
\end_layout
\begin_layout Enumerate
Simply mark this record as dead and return.
\end_layout
\end_deeper
\begin_layout Enumerate
Get the free list lock for the remainder of this operation.
\end_layout
\begin_layout Enumerate
\begin_inset CommandInset label
LatexCommand label
name "right-merging"
\end_inset
Examine the following block to see if it is free; if so, enlarge the current
block and remove that block from the free list.
This was disabled, as removal from the free list was O(entries-in-free-list).
\end_layout
\begin_layout Enumerate
Examine the preceeding block to see if it is free: for this reason, each
block has a 32-bit tailer which indicates its length.
If it is free, expand it to cover our new block and return.
\end_layout
\begin_layout Enumerate
Otherwise, prepend ourselves to the free list.
\end_layout
\begin_layout Standard
Disabling right-merging (step
\begin_inset CommandInset ref
LatexCommand ref
reference "right-merging"
\end_inset
) causes fragmentation; the other heuristics proved insufficient to address
this, so the final answer to this was that when we expand the TDB file
inside a transaction commit, we repack the entire tdb.
\end_layout
\begin_layout Standard
The single list lock limits our allocation rate; due to the other issues
this is not currently seen as a bottleneck.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
The first step is to remove all the current heuristics, as they obviously
interact, then examine them once the lock contention is addressed.
\end_layout
\begin_layout Standard
The free list must be split to reduce contention.
Assuming perfect free merging, we can at most have 1 free list entry for
each entry.
This implies that the number of free lists is related to the size of the
hash table, but as it is rare to walk a large number of free list entries
we can use far fewer, say 1/32 of the number of hash buckets.
\end_layout
\begin_layout Standard
It seems tempting to try to reuse the hash implementation which we use for
records here, but we have two ways of searching for free entries: for allocatio
n we search by size (and possibly zone) which produces too many clashes
for our hash table to handle well, and for coalescing we search by address.
Thus an array of doubly-linked free lists seems preferable.
\end_layout
\begin_layout Standard
There are various benefits in using per-size free lists (see
\begin_inset CommandInset ref
LatexCommand ref
reference "sub:TDB-Becomes-Fragmented"
\end_inset
) but it's not clear this would reduce contention in the common case where
all processes are allocating/freeing the same size.
Thus we almost certainly need to divide in other ways: the most obvious
is to divide the file into zones, and using a free list (or table of free
lists) for each.
This approximates address ordering.
\end_layout
\begin_layout Standard
Unfortunately it is difficult to know what heuristics should be used to
determine zone sizes, and our transaction code relies on being able to
create a
\begin_inset Quotes eld
\end_inset
recovery area
\begin_inset Quotes erd
\end_inset
by simply appending to the file (difficult if it would need to create a
new zone header).
Thus we use a linked-list of free tables; currently we only ever create
one, but if there is more than one we choose one at random to use.
In future we may use heuristics to add new free tables on contention.
We only expand the file when all free tables are exhausted.
\end_layout
\begin_layout Standard
The basic algorithm is as follows.
Freeing is simple:
\end_layout
\begin_layout Enumerate
Identify the correct free list.
\end_layout
\begin_layout Enumerate
Lock the corresponding list.
\end_layout
\begin_layout Enumerate
Re-check the list (we didn't have a lock, sizes could have changed): relock
if necessary.
\end_layout
\begin_layout Enumerate
Place the freed entry in the list.
\end_layout
\begin_layout Standard
Allocation is a little more complicated, as we perform delayed coalescing
at this point:
\end_layout
\begin_layout Enumerate
Pick a free table; usually the previous one.
\end_layout
\begin_layout Enumerate
Lock the corresponding list.
\end_layout
\begin_layout Enumerate
If the top entry is -large enough, remove it from the list and return it.
\end_layout
\begin_layout Enumerate
Otherwise, coalesce entries in the list.If there was no entry large enough,
unlock the list and try the next largest list
\end_layout
\begin_layout Enumerate
If no list has an entry which meets our needs, try the next free table.
\end_layout
\begin_layout Enumerate
If no zone satisfies, expand the file.
\end_layout
\begin_layout Standard
This optimizes rapid insert/delete of free list entries by not coalescing
them all the time..
First-fit address ordering ordering seems to be fairly good for keeping
fragmentation low (see
\begin_inset CommandInset ref
LatexCommand ref
reference "sub:TDB-Becomes-Fragmented"
\end_inset
).
Note that address ordering does not need a tailer to coalesce, though if
we needed one we could have one cheaply: see
\begin_inset CommandInset ref
LatexCommand ref
reference "sub:Records-Incur-A"
\end_inset
.
\end_layout
\begin_layout Standard
Each free entry has the free table number in the header: less than 255.
It also contains a doubly-linked list for easy deletion.
\end_layout
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "sub:TDB-Becomes-Fragmented"
\end_inset
TDB Becomes Fragmented
\end_layout
\begin_layout Standard
Much of this is a result of allocation strategy
\begin_inset Foot
status collapsed
\begin_layout Plain Layout
The Memory Fragmentation Problem: Solved? Johnstone & Wilson 1995 ftp://ftp.cs.ute
xas.edu/pub/garbage/malloc/ismm98.ps
\end_layout
\end_inset
and deliberate hobbling of coalescing; internal fragmentation (aka overallocati
on) is deliberately set at 25%, and external fragmentation is only cured
by the decision to repack the entire db when a transaction commit needs
to enlarge the file.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
The 25% overhead on allocation works in practice for ldb because indexes
tend to expand by one record at a time.
This internal fragmentation can be resolved by having an
\begin_inset Quotes eld
\end_inset
expanded
\begin_inset Quotes erd
\end_inset
bit in the header to note entries that have previously expanded, and allocating
more space for them.
\end_layout
\begin_layout Standard
There are is a spectrum of possible solutions for external fragmentation:
one is to use a fragmentation-avoiding allocation strategy such as best-fit
address-order allocator.
The other end of the spectrum would be to use a bump allocator (very fast
and simple) and simply repack the file when we reach the end.
\end_layout
\begin_layout Standard
There are three problems with efficient fragmentation-avoiding allocators:
they are non-trivial, they tend to use a single free list for each size,
and there's no evidence that tdb allocation patterns will match those recorded
for general allocators (though it seems likely).
\end_layout
\begin_layout Standard
Thus we don't spend too much effort on external fragmentation; we will be
no worse than the current code if we need to repack on occasion.
More effort is spent on reducing freelist contention, and reducing overhead.
\end_layout
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "sub:Records-Incur-A"
\end_inset
Records Incur A 28-Byte Overhead
\end_layout
\begin_layout Standard
Each TDB record has a header as follows:
\end_layout
\begin_layout LyX-Code
struct tdb_record {
\end_layout
\begin_layout LyX-Code
tdb_off_t next; /* offset of the next record in the list */
\end_layout
\begin_layout LyX-Code
tdb_len_t rec_len; /* total byte length of record */
\end_layout
\begin_layout LyX-Code
tdb_len_t key_len; /* byte length of key */
\end_layout
\begin_layout LyX-Code
tdb_len_t data_len; /* byte length of data */
\end_layout
\begin_layout LyX-Code
uint32_t full_hash; /* the full 32 bit hash of the key */
\end_layout
\begin_layout LyX-Code
uint32_t magic; /* try to catch errors */
\end_layout
\begin_layout LyX-Code
/* the following union is implied:
\end_layout
\begin_layout LyX-Code
union {
\end_layout
\begin_layout LyX-Code
char record[rec_len];
\end_layout
\begin_layout LyX-Code
struct {
\end_layout
\begin_layout LyX-Code
char key[key_len];
\end_layout
\begin_layout LyX-Code
char data[data_len];
\end_layout
\begin_layout LyX-Code
}
\end_layout
\begin_layout LyX-Code
uint32_t totalsize; (tailer)
\end_layout
\begin_layout LyX-Code
}
\end_layout
\begin_layout LyX-Code
*/
\end_layout
\begin_layout LyX-Code
};
\end_layout
\begin_layout Standard
Naively, this would double to a 56-byte overhead on a 64 bit implementation.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
We can use various techniques to reduce this for an allocated block:
\end_layout
\begin_layout Enumerate
The 'next' pointer is not required, as we are using a flat hash table.
\end_layout
\begin_layout Enumerate
'rec_len' can instead be expressed as an addition to key_len and data_len
(it accounts for wasted or overallocated length in the record).
Since the record length is always a multiple of 8, we can conveniently
fit it in 32 bits (representing up to 35 bits).
\end_layout
\begin_layout Enumerate
'key_len' and 'data_len' can be reduced.
I'm unwilling to restrict 'data_len' to 32 bits, but instead we can combine
the two into one 64-bit field and using a 5 bit value which indicates at
what bit to divide the two.
Keys are unlikely to scale as fast as data, so I'm assuming a maximum key
size of 32 bits.
\end_layout
\begin_layout Enumerate
'full_hash' is used to avoid a memcmp on the
\begin_inset Quotes eld
\end_inset
miss
\begin_inset Quotes erd
\end_inset
case, but this is diminishing returns after a handful of bits (at 10 bits,
it reduces 99.9% of false memcmp).
As an aside, as the lower bits are already incorporated in the hash table
resolution, the upper bits should be used here.
Note that it's not clear that these bits will be a win, given the extra
bits in the hash table itself (see
\begin_inset CommandInset ref
LatexCommand ref
reference "sub:Hash-Size-Solution"
\end_inset
).
\end_layout
\begin_layout Enumerate
'magic' does not need to be enlarged: it currently reflects one of 5 values
(used, free, dead, recovery, and unused_recovery).
It is useful for quick sanity checking however, and should not be eliminated.
\end_layout
\begin_layout Enumerate
'tailer' is only used to coalesce free blocks (so a block to the right can
find the header to check if this block is free).
This can be replaced by a single 'free' bit in the header of the following
block (and the tailer only exists in free blocks).
\begin_inset Foot
status collapsed
\begin_layout Plain Layout
This technique from Thomas Standish.
Data Structure Techniques.
Addison-Wesley, Reading, Massachusetts, 1980.
\end_layout
\end_inset
The current proposed coalescing algorithm doesn't need this, however.
\end_layout
\begin_layout Standard
This produces a 16 byte used header like this:
\end_layout
\begin_layout LyX-Code
struct tdb_used_record {
\end_layout
\begin_layout LyX-Code
uint32_t used_magic : 16,
\end_layout
\begin_layout LyX-Code
\end_layout
\begin_layout LyX-Code
key_data_divide: 5,
\end_layout
\begin_layout LyX-Code
top_hash: 11;
\end_layout
\begin_layout LyX-Code
uint32_t extra_octets;
\end_layout
\begin_layout LyX-Code
uint64_t key_and_data_len;
\end_layout
\begin_layout LyX-Code
};
\end_layout
\begin_layout Standard
And a free record like this:
\end_layout
\begin_layout LyX-Code
struct tdb_free_record {
\end_layout
\begin_layout LyX-Code
uint64_t free_magic: 8,
\end_layout
\begin_layout LyX-Code
prev : 56;
\end_layout
\begin_layout LyX-Code
\end_layout
\begin_layout LyX-Code
uint64_t free_table: 8,
\end_layout
\begin_layout LyX-Code
total_length : 56
\end_layout
\begin_layout LyX-Code
uint64_t next;;
\end_layout
\begin_layout LyX-Code
};
\end_layout
\begin_layout Standard
Note that by limiting valid offsets to 56 bits, we can pack everything we
need into 3 64-byte words, meaning our minimum record size is 8 bytes.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
Transaction Commit Requires 4 fdatasync
\end_layout
\begin_layout Standard
The current transaction algorithm is:
\end_layout
\begin_layout Enumerate
write_recovery_data();
\end_layout
\begin_layout Enumerate
sync();
\end_layout
\begin_layout Enumerate
write_recovery_header();
\end_layout
\begin_layout Enumerate
sync();
\end_layout
\begin_layout Enumerate
overwrite_with_new_data();
\end_layout
\begin_layout Enumerate
sync();
\end_layout
\begin_layout Enumerate
remove_recovery_header();
\end_layout
\begin_layout Enumerate
sync();
\end_layout
\begin_layout Standard
On current ext3, each sync flushes all data to disk, so the next 3 syncs
are relatively expensive.
But this could become a performance bottleneck on other filesystems such
as ext4.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
Neil Brown points out that this is overzealous, and only one sync is needed:
\end_layout
\begin_layout Enumerate
Bundle the recovery data, a transaction counter and a strong checksum of
the new data.
\end_layout
\begin_layout Enumerate
Strong checksum that whole bundle.
\end_layout
\begin_layout Enumerate
Store the bundle in the database.
\end_layout
\begin_layout Enumerate
Overwrite the oldest of the two recovery pointers in the header (identified
using the transaction counter) with the offset of this bundle.
\end_layout
\begin_layout Enumerate
sync.
\end_layout
\begin_layout Enumerate
Write the new data to the file.
\end_layout
\begin_layout Standard
Checking for recovery means identifying the latest bundle with a valid checksum
and using the new data checksum to ensure that it has been applied.
This is more expensive than the current check, but need only be done at
open.
For running databases, a separate header field can be used to indicate
a transaction in progress; we need only check for recovery if this is set.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Deferred.
\end_layout
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "sub:TDB-Does-Not"
\end_inset
TDB Does Not Have Snapshot Support
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
None.
At some point you say
\begin_inset Quotes eld
\end_inset
use a real database
\begin_inset Quotes erd
\end_inset
(but see
\begin_inset CommandInset ref
LatexCommand ref
reference "replay-attribute"
\end_inset
).
\end_layout
\begin_layout Standard
But as a thought experiment, if we implemented transactions to only overwrite
free entries (this is tricky: there must not be a header in each entry
which indicates whether it is free, but use of presence in metadata elsewhere),
and a pointer to the hash table, we could create an entirely new commit
without destroying existing data.
Then it would be easy to implement snapshots in a similar way.
\end_layout
\begin_layout Standard
This would not allow arbitrary changes to the database, such as tdb_repack
does, and would require more space (since we have to preserve the current
and future entries at once).
If we used hash trees rather than one big hash table, we might only have
to rewrite some sections of the hash, too.
\end_layout
\begin_layout Standard
We could then implement snapshots using a similar method, using multiple
different hash tables/free tables.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Deferred.
\end_layout
\begin_layout Subsection
Transactions Cannot Operate in Parallel
\end_layout
\begin_layout Standard
This would be useless for ldb, as it hits the index records with just about
every update.
It would add significant complexity in resolving clashes, and cause the
all transaction callers to write their code to loop in the case where the
transactions spuriously failed.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
None (but see
\begin_inset CommandInset ref
LatexCommand ref
reference "replay-attribute"
\end_inset
).
We could solve a small part of the problem by providing read-only transactions.
These would allow one write transaction to begin, but it could not commit
until all r/o transactions are done.
This would require a new RO_TRANSACTION_LOCK, which would be upgraded on
commit.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Deferred.
\end_layout
\begin_layout Subsection
Default Hash Function Is Suboptimal
\end_layout
\begin_layout Standard
The Knuth-inspired multiplicative hash used by tdb is fairly slow (especially
if we expand it to 64 bits), and works best when the hash bucket size is
a prime number (which also means a slow modulus).
In addition, it is highly predictable which could potentially lead to a
Denial of Service attack in some TDB uses.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
The Jenkins lookup3 hash
\begin_inset Foot
status open
\begin_layout Plain Layout
http://burtleburtle.net/bob/c/lookup3.c
\end_layout
\end_inset
is a fast and superbly-mixing hash.
It's used by the Linux kernel and almost everything else.
This has the particular properties that it takes an initial seed, and produces
two 32 bit hash numbers, which we can combine into a 64-bit hash.
\end_layout
\begin_layout Standard
The seed should be created at tdb-creation time from some random source,
and placed in the header.
This is far from foolproof, but adds a little bit of protection against
hash bombing.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "Reliable-Traversal-Adds"
\end_inset
Reliable Traversal Adds Complexity
\end_layout
\begin_layout Standard
We lock a record during traversal iteration, and try to grab that lock in
the delete code.
If that grab on delete fails, we simply mark it deleted and continue onwards;
traversal checks for this condition and does the delete when it moves off
the record.
\end_layout
\begin_layout Standard
If traversal terminates, the dead record may be left indefinitely.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
Remove reliability guarantees; see
\begin_inset CommandInset ref
LatexCommand ref
reference "traverse-Proposed-Solution"
\end_inset
.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Complete.
\end_layout
\begin_layout Subsection
Fcntl Locking Adds Overhead
\end_layout
\begin_layout Standard
Placing a fcntl lock means a system call, as does removing one.
This is actually one reason why transactions can be faster (everything
is locked once at transaction start).
In the uncontended case, this overhead can theoretically be eliminated.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
None.
\end_layout
\begin_layout Standard
We tried this before with spinlock support, in the early days of TDB, and
it didn't make much difference except in manufactured benchmarks.
\end_layout
\begin_layout Standard
We could use spinlocks (with futex kernel support under Linux), but it means
that we lose automatic cleanup when a process dies with a lock.
There is a method of auto-cleanup under Linux, but it's not supported by
other operating systems.
We could reintroduce a clear-if-first-style lock and sweep for dead futexes
on open, but that wouldn't help the normal case of one concurrent opener
dying.
Increasingly elaborate repair schemes could be considered, but they require
an ABI change (everyone must use them) anyway, so there's no need to do
this at the same time as everything else.
\end_layout
\begin_layout Subsection
Some Transactions Don't Require Durability
\end_layout
\begin_layout Standard
Volker points out that gencache uses a CLEAR_IF_FIRST tdb for normal (fast)
usage, and occasionally empties the results into a transactional TDB.
This kind of usage prioritizes performance over durability: as long as
we are consistent, data can be lost.
\end_layout
\begin_layout Standard
This would be more neatly implemented inside tdb: a
\begin_inset Quotes eld
\end_inset
soft
\begin_inset Quotes erd
\end_inset
transaction commit (ie.
syncless) which meant that data may be reverted on a crash.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\end_layout
\begin_layout Standard
None.
\end_layout
\begin_layout Standard
Unfortunately any transaction scheme which overwrites old data requires
a sync before that overwrite to avoid the possibility of corruption.
\end_layout
\begin_layout Standard
It seems possible to use a scheme similar to that described in
\begin_inset CommandInset ref
LatexCommand ref
reference "sub:TDB-Does-Not"
\end_inset
,where transactions are committed without overwriting existing data, and
an array of top-level pointers were available in the header.
If the transaction is
\begin_inset Quotes eld
\end_inset
soft
\begin_inset Quotes erd
\end_inset
then we would not need a sync at all: existing processes would pick up
the new hash table and free list and work with that.
\end_layout
\begin_layout Standard
At some later point, a sync would allow recovery of the old data into the
free lists (perhaps when the array of top-level pointers filled).
On crash, tdb_open() would examine the array of top levels, and apply the
transactions until it encountered an invalid checksum.
\end_layout
\begin_layout Subsection
Tracing Is Fragile, Replay Is External
\end_layout
\begin_layout Standard
The current TDB has compile-time-enabled tracing code, but it often breaks
as it is not enabled by default.
In a similar way, the ctdb code has an external wrapper which does replay
tracing so it can coordinate cluster-wide transactions.
\end_layout
\begin_layout Subsubsection
Proposed Solution
\begin_inset CommandInset label
LatexCommand label
name "replay-attribute"
\end_inset
\end_layout
\begin_layout Standard
Tridge points out that an attribute can be later added to tdb_open (see
\begin_inset CommandInset ref
LatexCommand ref
reference "attributes"
\end_inset
) to provide replay/trace hooks, which could become the basis for this and
future parallel transactions and snapshot support.
\end_layout
\begin_layout Subsubsection
Status
\end_layout
\begin_layout Standard
Deferred.
\end_layout
\end_body
\end_document
ntdb-1.0/doc/design.pdf 0000664 0000000 0000000 00000566707 12241515307 0015037 0 ustar 00root root 0000000 0000000 %PDF-1.4
%ÐÔÅØ
3 0 obj <<
/Length 2170
/Filter /FlateDecode
>>
stream
xÚZKsã6¾ûWpO¡ªFâE‚Ù\ìI&ëT’ÚÍø’Jr DXbF">ìøß§
ÑÔÅD+ž6€þºûën€²£`DÁ·7Ñß<ïnÞ¾çIÀ&¸VÁÃcÀµf\1×L¦Ið¿„?>|}÷åb)¹²¹mŠMY”\î*—yÑ|¤—ǪÞg-‹†žRÐsU´Í¿Á¿H†ÏE»%a×d¯h
*ý–Íò¥‹×\4~#)HŒ*¶ÕB¤áóB˜0«óÆÍ0PßâS…»b_´HŽŠaÃÙ=nEÝÛul³rcÙb©bL–Âë«í]QÛœÞVµÍ>.t:³Qr¨+25:µò©ÈÑ)t˜•DpuÀ„0mWbL‹x¡Uà[UÛ§¢)ZB>ÑøÝàYMRÊìã5aKUöx9
ÀXHͦ³´íe¼äWâã2~)K#ÈQ)Xª|Vq áQÞ—ÉöF[y˜wëcfCÝè …Ì1*0Á™áé §”L1§d_{(xM&|{ƲÊz©êbS”Ùn÷â'ë¢mmI/+_€8¾-óÚ>Ÿ ä¨H’ÕeðPQØûÃÎöq•NF~Ä$°/os0 ó4 Yâ°i¡Šå n^šÖîi‚RG-òà’×g{?ºýï=íB;P¯ö˜\„«®%øX!ø3¥6,Þãk·k‹3¶³¹]Š&žoe\„zéó¶ØY®\ÚY_8àÀ>Ãèàõír±‚A‡ƒ
¶4ùk¤£¯h‘p‡¡t¹
oÕ#Í¿ƒµœ†-©sã¢\ïºÜ_Æ\ùpûÃÝ-û¤G7®<