bit-babbler-0.9/0002755000000000000000000000000014136173163010424 5ustar bit-babbler-0.9/Makeup/0002755000000000000000000000000014136173163011646 5ustar bit-babbler-0.9/Makeup/Makeup.conf0000644000000000000000000000220714136173163013736 0ustar # makeup configuration data. # # Copyright 2003 - 2021, Ron MAKEUP_VERSION = 0.38 #MAKEUP_VERBOSE = yes MAKEUP_DIR = $(top_srcdir)/Makeup MAKEUP_CONFIG_DIR = $(MAKEUP_DIR)/config MAKEUP_CONF_M4_DIR = $(MAKEUP_CONFIG_DIR)/m4 MAKEUP_AC_DIR = $(MAKEUP_DIR)/ac-fragments MAKEUP_AC_AUX_DIR = $(MAKEUP_DIR)/ac-aux MAKEUP_GMAKE_DIR = $(MAKEUP_DIR)/gmake-fragments MAKEUP_SWIG_DIR = $(MAKEUP_DIR)/swig MAKEUP_SWIG_IF_DIR = $(MAKEUP_SWIG_DIR)/interfaces MAKEUP_SWIG_WRAP_DIR = $(MAKEUP_SWIG_DIR)/wrappers MAKEUP_DOXYGEN_DIR = $(MAKEUP_DIR)/doxygen MAKEUP_DOXYGEN_THEME_DIR = $(MAKEUP_DOXYGEN_DIR)/themes MAKEUP_STAMP_DIR = $(MAKEUP_DIR)/stamp MAKEUP_TEST_DIR = $(MAKEUP_DIR)/test SYS_MAKEUP_DIR = /usr/share/makeup SYS_MAKEUP_AC_DIR = $(SYS_MAKEUP_DIR)/ac-fragments SYS_MAKEUP_GMAKE_DIR = $(SYS_MAKEUP_DIR)/gmake-fragments SYS_MAKEUP_DOXYGEN_DIR = $(SYS_MAKEUP_DIR)/doxygen SYS_MAKEUP_DOXYGEN_THEME_DIR = $(SYS_MAKEUP_DOXYGEN_DIR)/themes MAKEUP_TARGET_TYPES = EXECUTABLE LIBRARY PLUGIN DATA SWIG KBUILD MAKEUP_LINK_LANGUAGES = C C++ PROJECT_DIRS = doc include src PACKAGE_CONF = $(MAKEUP_CONFIG_DIR)/Package.conf bit-babbler-0.9/Makeup/ac-aux/0002755000000000000000000000000014136173163013024 5ustar bit-babbler-0.9/Makeup/ac-aux/config.guess0000755000000000000000000012637314136173163015356 0ustar #! /bin/sh # Attempt to guess a canonical system name. # Copyright 1992-2018 Free Software Foundation, Inc. timestamp='2018-02-24' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # # Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: # https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess # # Please send patches to . me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi trap 'exit 1' 1 2 15 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. set_cc_for_build=' trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; : ${TMPDIR=/tmp} ; { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in ,,) echo "int x;" > "$dummy.c" ; for c in cc gcc c89 c99 ; do if ($c -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found ; fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac ; set_cc_for_build= ;' # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if (test -f /.attbin/uname) >/dev/null 2>&1 ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown case "$UNAME_SYSTEM" in Linux|GNU|GNU/*) # If the system lacks a compiler, then just pick glibc. # We could probably try harder. LIBC=gnu eval "$set_cc_for_build" cat <<-EOF > "$dummy.c" #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc #else LIBC=gnu #endif EOF eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`" # If ldd exists, use it to detect musl libc. if command -v ldd >/dev/null && \ ldd --version 2>&1 | grep -q ^musl then LIBC=musl fi ;; esac # Note: order is significant - the case branches are not exclusive. case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ "/sbin/$sysctl" 2>/dev/null || \ "/usr/sbin/$sysctl" 2>/dev/null || \ echo unknown)` case "$UNAME_MACHINE_ARCH" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; earmv*) arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` machine="${arch}${endian}"-unknown ;; *) machine="$UNAME_MACHINE_ARCH"-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently (or will in the future) and ABI. case "$UNAME_MACHINE_ARCH" in earm*) os=netbsdelf ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval "$set_cc_for_build" if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # Determine ABI tags. case "$UNAME_MACHINE_ARCH" in earm*) expr='s/^earmv[0-9]/-eabi/;s/eb$//' abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "$UNAME_VERSION" in Debian*) release='-gnu' ;; *) release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "$machine-${os}${release}${abi}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" exit ;; *:LibertyBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" exit ;; *:MidnightBSD:*:*) echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" exit ;; *:ekkoBSD:*:*) echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" exit ;; *:SolidBSD:*:*) echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:MirBSD:*:*) echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:Sortix:*:*) echo "$UNAME_MACHINE"-unknown-sortix exit ;; *:Redox:*:*) echo "$UNAME_MACHINE"-unknown-redox exit ;; mips:OSF1:*.*) echo mips-dec-osf1 exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE=alpha ;; "EV4.5 (21064)") UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") UNAME_MACHINE=alpha ;; "EV5 (21164)") UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`" # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo "$UNAME_MACHINE"-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo "$UNAME_MACHINE"-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix"$UNAME_RELEASE" exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux"$UNAME_RELEASE" exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval "$set_cc_for_build" SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH=x86_64 fi fi echo "$SUN_ARCH"-pc-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`" exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos"$UNAME_RELEASE" exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos"$UNAME_RELEASE" ;; sun4) echo sparc-sun-sunos"$UNAME_RELEASE" ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos"$UNAME_RELEASE" exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint"$UNAME_RELEASE" exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint"$UNAME_RELEASE" exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint"$UNAME_RELEASE" exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint"$UNAME_RELEASE" exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint"$UNAME_RELEASE" exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint"$UNAME_RELEASE" exit ;; m68k:machten:*:*) echo m68k-apple-machten"$UNAME_RELEASE" exit ;; powerpc:machten:*:*) echo powerpc-apple-machten"$UNAME_RELEASE" exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix"$UNAME_RELEASE" exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix"$UNAME_RELEASE" exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix"$UNAME_RELEASE" exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) eval "$set_cc_for_build" sed 's/^ //' << EOF > "$dummy.c" #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o "$dummy" "$dummy.c" && dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`"$dummy" "$dummyarg"` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos"$UNAME_RELEASE" exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ] then if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \ [ "$TARGET_BINARY_INTERFACE"x = x ] then echo m88k-dg-dgux"$UNAME_RELEASE" else echo m88k-dg-dguxbcs"$UNAME_RELEASE" fi else echo i586-dg-dgux"$UNAME_RELEASE" fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`" exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then eval "$set_cc_for_build" sed 's/^ //' << EOF > "$dummy.c" #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/lslpp ] ; then IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi echo "$IBM_ARCH"-ibm-aix"$IBM_REV" exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` case "$UNAME_MACHINE" in 9000/31?) HP_ARCH=m68000 ;; 9000/[34]??) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "$sc_cpu_version" in 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "$sc_kernel_bits" in 32) HP_ARCH=hppa2.0n ;; 64) HP_ARCH=hppa2.0w ;; '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi if [ "$HP_ARCH" = "" ]; then eval "$set_cc_for_build" sed 's/^ //' << EOF > "$dummy.c" #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ "$HP_ARCH" = hppa2.0w ] then eval "$set_cc_for_build" # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH=hppa2.0w else HP_ARCH=hppa64 fi fi echo "$HP_ARCH"-hp-hpux"$HPUX_REV" exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux"$HPUX_REV" exit ;; 3050*:HI-UX:*:*) eval "$set_cc_for_build" sed 's/^ //' << EOF > "$dummy.c" #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo "$UNAME_MACHINE"-unknown-osf1mk else echo "$UNAME_MACHINE"-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi"$UNAME_RELEASE" exit ;; *:BSD/OS:*:*) echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` case "$UNAME_PROCESSOR" in amd64) UNAME_PROCESSOR=x86_64 ;; i386) UNAME_PROCESSOR=i586 ;; esac echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; i*:CYGWIN*:*) echo "$UNAME_MACHINE"-pc-cygwin exit ;; *:MINGW64*:*) echo "$UNAME_MACHINE"-pc-mingw64 exit ;; *:MINGW*:*) echo "$UNAME_MACHINE"-pc-mingw32 exit ;; *:MSYS*:*) echo "$UNAME_MACHINE"-pc-msys exit ;; i*:PW*:*) echo "$UNAME_MACHINE"-pc-pw32 exit ;; *:Interix*:*) case "$UNAME_MACHINE" in x86) echo i586-pc-interix"$UNAME_RELEASE" exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix"$UNAME_RELEASE" exit ;; IA64) echo ia64-unknown-interix"$UNAME_RELEASE" exit ;; esac ;; i*:UWIN*:*) echo "$UNAME_MACHINE"-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; *:GNU:*:*) # the GNU system echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`" exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC" exit ;; i*86:Minix:*:*) echo "$UNAME_MACHINE"-pc-minix exit ;; aarch64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC=gnulibc1 ; fi echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arc:Linux:*:* | arceb:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arm*:Linux:*:*) eval "$set_cc_for_build" if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi else echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf fi fi exit ;; avr32*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; cris:Linux:*:*) echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; crisv32:Linux:*:*) echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; e2k:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; frv:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; hexagon:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:Linux:*:*) echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; ia64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; k1om:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m32r*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m68*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval "$set_cc_for_build" sed 's/^ //' << EOF > "$dummy.c" #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU'`" test "x$CPU" != x && { echo "$CPU-unknown-linux-$LIBC"; exit; } ;; mips64el:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; openrisc*:Linux:*:*) echo or1k-unknown-linux-"$LIBC" exit ;; or32:Linux:*:* | or1k*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; padre:Linux:*:*) echo sparc-unknown-linux-"$LIBC" exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-"$LIBC" exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; *) echo hppa-unknown-linux-"$LIBC" ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-"$LIBC" exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-"$LIBC" exit ;; ppc64le:Linux:*:*) echo powerpc64le-unknown-linux-"$LIBC" exit ;; ppcle:Linux:*:*) echo powerpcle-unknown-linux-"$LIBC" exit ;; riscv32:Linux:*:* | riscv64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" exit ;; sh64*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sh*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; tile*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; vax:Linux:*:*) echo "$UNAME_MACHINE"-dec-linux-"$LIBC" exit ;; x86_64:Linux:*:*) if objdump -f /bin/sh | grep -q elf32-x86-64; then echo "$UNAME_MACHINE"-pc-linux-"$LIBC"x32 else echo "$UNAME_MACHINE"-pc-linux-"$LIBC" fi exit ;; xtensa*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo "$UNAME_MACHINE"-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo "$UNAME_MACHINE"-unknown-stop exit ;; i*86:atheos:*:*) echo "$UNAME_MACHINE"-unknown-atheos exit ;; i*86:syllable:*:*) echo "$UNAME_MACHINE"-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos"$UNAME_RELEASE" exit ;; i*86:*DOS:*:*) echo "$UNAME_MACHINE"-pc-msdosdjgpp exit ;; i*86:*:4.*:*) UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" else echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}{$UNAME_VERSION}" exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" else echo "$UNAME_MACHINE"-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos"$UNAME_RELEASE" exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos"$UNAME_RELEASE" exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos"$UNAME_RELEASE" exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos"$UNAME_RELEASE" exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv"$UNAME_RELEASE" exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo "$UNAME_MACHINE"-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo "$UNAME_MACHINE"-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux"$UNAME_RELEASE" exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv"$UNAME_RELEASE" else echo mips-unknown-sysv"$UNAME_RELEASE" fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; x86_64:Haiku:*:*) echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux"$UNAME_RELEASE" exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux"$UNAME_RELEASE" exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux"$UNAME_RELEASE" exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux"$UNAME_RELEASE" exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux"$UNAME_RELEASE" exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux"$UNAME_RELEASE" exit ;; SX-ACE:SUPER-UX:*:*) echo sxace-nec-superux"$UNAME_RELEASE" exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Rhapsody:*:*) echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown eval "$set_cc_for_build" if test "$UNAME_PROCESSOR" = unknown ; then UNAME_PROCESSOR=powerpc fi if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in i386) UNAME_PROCESSOR=x86_64 ;; powerpc) UNAME_PROCESSOR=powerpc64 ;; esac fi # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_PPC >/dev/null then UNAME_PROCESSOR=powerpc fi fi elif test "$UNAME_PROCESSOR" = i386 ; then # Avoid executing cc on OS X 10.9, as it ships with a stub # that puts up a graphical alert prompting to install # developer tools. Any system running Mac OS X 10.7 or # later (Darwin 11 and later) is required to have a 64-bit # processor. This is not true of the ARM version of Darwin # that Apple uses in portable devices. UNAME_PROCESSOR=x86_64 fi echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-*:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk"$UNAME_RELEASE" exit ;; NSE-*:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk"$UNAME_RELEASE" exit ;; NSR-*:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk"$UNAME_RELEASE" exit ;; NSV-*:NONSTOP_KERNEL:*:*) echo nsv-tandem-nsk"$UNAME_RELEASE" exit ;; NSX-*:NONSTOP_KERNEL:*:*) echo nsx-tandem-nsk"$UNAME_RELEASE" exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. if test "$cputype" = 386; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo "$UNAME_MACHINE"-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux"$UNAME_RELEASE" exit ;; *:DragonFly:*:*) echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "$UNAME_MACHINE" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`" exit ;; i*86:rdos:*:*) echo "$UNAME_MACHINE"-pc-rdos exit ;; i*86:AROS:*:*) echo "$UNAME_MACHINE"-pc-aros exit ;; x86_64:VMkernel:*:*) echo "$UNAME_MACHINE"-unknown-esx exit ;; amd64:Isilon\ OneFS:*:*) echo x86_64-unknown-onefs exit ;; esac echo "$0: unable to guess system type" >&2 case "$UNAME_MACHINE:$UNAME_SYSTEM" in mips:Linux | mips64:Linux) # If we got here on MIPS GNU/Linux, output extra information. cat >&2 <&2 </dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = "$UNAME_MACHINE" UNAME_RELEASE = "$UNAME_RELEASE" UNAME_SYSTEM = "$UNAME_SYSTEM" UNAME_VERSION = "$UNAME_VERSION" EOF exit 1 # Local variables: # eval: (add-hook 'write-file-functions 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: bit-babbler-0.9/Makeup/ac-aux/config.rpath0000755000000000000000000004421614136173163015341 0ustar #! /bin/sh # Output a system dependent set of variables, describing how to set the # run time search path of shared libraries in an executable. # # Copyright 1996-2020 Free Software Foundation, Inc. # Taken from GNU libtool, 2001 # Originally by Gordon Matzigkeit , 1996 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # # The first argument passed to this file is the canonical host specification, # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld # should be set by the caller. # # The set of defined variables is at the end of this script. # Known limitations: # - On IRIX 6.5 with CC="cc", the run time search patch must not be longer # than 256 bytes, otherwise the compiler driver will dump core. The only # known workaround is to choose shorter directory names for the build # directory and/or the installation directory. # All known linkers require a '.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a shrext=.so host="$1" host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` # Code taken from libtool.m4's _LT_CC_BASENAME. for cc_temp in $CC""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`echo "$cc_temp" | sed -e 's%^.*/%%'` # Code taken from libtool.m4's _LT_COMPILER_PIC. wl= if test "$GCC" = yes; then wl='-Wl,' else case "$host_os" in aix*) wl='-Wl,' ;; mingw* | cygwin* | pw32* | os2* | cegcc*) ;; hpux9* | hpux10* | hpux11*) wl='-Wl,' ;; irix5* | irix6* | nonstopux*) wl='-Wl,' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) case $cc_basename in ecc*) wl='-Wl,' ;; icc* | ifort*) wl='-Wl,' ;; lf95*) wl='-Wl,' ;; nagfor*) wl='-Wl,-Wl,,' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) wl='-Wl,' ;; ccc*) wl='-Wl,' ;; xl* | bgxl* | bgf* | mpixl*) wl='-Wl,' ;; como) wl='-lopt=' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ F* | *Sun*Fortran*) wl= ;; *Sun\ C*) wl='-Wl,' ;; esac ;; esac ;; newsos6) ;; *nto* | *qnx*) ;; osf3* | osf4* | osf5*) wl='-Wl,' ;; rdos*) ;; solaris*) case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) wl='-Qoption ld ' ;; *) wl='-Wl,' ;; esac ;; sunos4*) wl='-Qoption ld ' ;; sysv4 | sysv4.2uw2* | sysv4.3*) wl='-Wl,' ;; sysv4*MP*) ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) wl='-Wl,' ;; unicos*) wl='-Wl,' ;; uts4*) ;; esac fi # Code taken from libtool.m4's _LT_LINKER_SHLIBS. hardcode_libdir_flag_spec= hardcode_libdir_separator= hardcode_direct=no hardcode_minus_L=no case "$host_os" in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; esac ld_shlibs=yes if test "$with_gnu_ld" = yes; then # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. # Unlike libtool, we use -rpath here, not --rpath, since the documented # option of GNU ld is called -rpath, not --rpath. hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' case "$host_os" in aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no fi ;; amigaos*) case "$host_cpu" in powerpc) ;; m68k) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; beos*) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then : else ld_shlibs=no fi ;; haiku*) ;; interix[3-9]*) hardcode_direct=no hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; netbsd*) ;; solaris*) if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then ld_shlibs=no elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no ;; *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' else ld_shlibs=no fi ;; esac ;; sunos4*) hardcode_direct=yes ;; *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; esac if test "$ld_shlibs" = no; then hardcode_libdir_flag_spec= fi else case "$host_os" in aix3*) # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test "$GCC" = yes; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac fi hardcode_direct=yes hardcode_libdir_separator=':' if test "$GCC" = yes; then case $host_os in aix4.[012]|aix4.[012].*) collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && \ strings "$collect2name" | grep resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac fi # Begin _LT_AC_SYS_LIBPATH_AIX. echo 'int main () { return 0; }' > conftest.c ${CC} ${LDFLAGS} conftest.c -o conftest aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } }'` if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } }'` fi if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib" fi rm -f conftest.c conftest # End _LT_AC_SYS_LIBPATH_AIX. if test "$aix_use_runtimelinking" = yes; then hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' else hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" fi fi ;; amigaos*) case "$host_cpu" in powerpc) ;; m68k) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; bsdi[45]*) ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec=' ' libext=lib ;; darwin* | rhapsody*) hardcode_direct=no if { case $cc_basename in ifort*) true;; *) test "$GCC" = yes;; esac; }; then : else ld_shlibs=no fi ;; dgux*) hardcode_libdir_flag_spec='-L$libdir' ;; freebsd2.[01]*) hardcode_direct=yes hardcode_minus_L=yes ;; freebsd* | dragonfly*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; hpux9*) hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; hpux10*) if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no ;; *) hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; netbsd*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; newsos6) hardcode_direct=yes hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then hardcode_libdir_flag_spec='${wl}-rpath,$libdir' else case "$host_os" in openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) hardcode_libdir_flag_spec='-R$libdir' ;; *) hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; esac fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; osf3*) hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) if test "$GCC" = yes; then hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' else # Both cc and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi hardcode_libdir_separator=: ;; solaris*) hardcode_libdir_flag_spec='-R$libdir' ;; sunos4*) hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes ;; sysv4) case $host_vendor in sni) hardcode_direct=yes # is this really true??? ;; siemens) hardcode_direct=no ;; motorola) hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac ;; sysv4.3*) ;; sysv4*MP*) if test -d /usr/nec; then ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) ;; sysv5* | sco3.2v5* | sco5v6*) hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' hardcode_libdir_separator=':' ;; uts4*) hardcode_libdir_flag_spec='-L$libdir' ;; *) ld_shlibs=no ;; esac fi # Check dynamic linker characteristics # Code taken from libtool.m4's _LT_SYS_DYNAMIC_LINKER. # Unlike libtool.m4, here we don't care about _all_ names of the library, but # only about the one the linker finds when passed -lNAME. This is the last # element of library_names_spec in libtool.m4, or possibly two of them if the # linker has special search rules. library_names_spec= # the last element of library_names_spec in libtool.m4 libname_spec='lib$name' case "$host_os" in aix3*) library_names_spec='$libname.a' ;; aix[4-9]*) library_names_spec='$libname$shrext' ;; amigaos*) case "$host_cpu" in powerpc*) library_names_spec='$libname$shrext' ;; m68k) library_names_spec='$libname.a' ;; esac ;; beos*) library_names_spec='$libname$shrext' ;; bsdi[45]*) library_names_spec='$libname$shrext' ;; cygwin* | mingw* | pw32* | cegcc*) shrext=.dll library_names_spec='$libname.dll.a $libname.lib' ;; darwin* | rhapsody*) shrext=.dylib library_names_spec='$libname$shrext' ;; dgux*) library_names_spec='$libname$shrext' ;; freebsd[23].*) library_names_spec='$libname$shrext$versuffix' ;; freebsd* | dragonfly*) library_names_spec='$libname$shrext' ;; gnu*) library_names_spec='$libname$shrext' ;; haiku*) library_names_spec='$libname$shrext' ;; hpux9* | hpux10* | hpux11*) case $host_cpu in ia64*) shrext=.so ;; hppa*64*) shrext=.sl ;; *) shrext=.sl ;; esac library_names_spec='$libname$shrext' ;; interix[3-9]*) library_names_spec='$libname$shrext' ;; irix5* | irix6* | nonstopux*) library_names_spec='$libname$shrext' case "$host_os" in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;; *) libsuff= shlibsuff= ;; esac ;; esac ;; linux*oldld* | linux*aout* | linux*coff*) ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) library_names_spec='$libname$shrext' ;; knetbsd*-gnu) library_names_spec='$libname$shrext' ;; netbsd*) library_names_spec='$libname$shrext' ;; newsos6) library_names_spec='$libname$shrext' ;; *nto* | *qnx*) library_names_spec='$libname$shrext' ;; openbsd*) library_names_spec='$libname$shrext$versuffix' ;; os2*) libname_spec='$name' shrext=.dll library_names_spec='$libname.a' ;; osf3* | osf4* | osf5*) library_names_spec='$libname$shrext' ;; rdos*) ;; solaris*) library_names_spec='$libname$shrext' ;; sunos4*) library_names_spec='$libname$shrext$versuffix' ;; sysv4 | sysv4.3*) library_names_spec='$libname$shrext' ;; sysv4*MP*) library_names_spec='$libname$shrext' ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) library_names_spec='$libname$shrext' ;; tpf*) library_names_spec='$libname$shrext' ;; uts4*) library_names_spec='$libname$shrext' ;; esac sed_quote_subst='s/\(["`$\\]\)/\\\1/g' escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"` shlibext=`echo "$shrext" | sed -e 's,^\.,,'` escaped_libname_spec=`echo "X$libname_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` escaped_library_names_spec=`echo "X$library_names_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` LC_ALL=C sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' <. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # Please send patches to . # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. # If it is invalid, we print an error message on stderr and exit with code 1. # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: # https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases # that are meaningful with *any* GNU software. # Each package is responsible for reporting which valid configurations # it does not support. The user should be able to distinguish # a failure to support a valid configuration from a meaningless # configuration. # The goal of this file is to map all the various variations of a given # machine specification into a single specification in the form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or in some cases, the newer four-part form: # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.sub ($timestamp) Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" exit 1 ;; *local*) # First pass through any local machine types. echo "$1" exit ;; * ) break ;; esac done case $# in 0) echo "$me: missing argument$help" >&2 exit 1;; 1) ;; *) echo "$me: too many arguments$help" >&2 exit 1;; esac # Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). # Here we must recognize all the valid KERNEL-OS combinations. maybe_os=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ kopensolaris*-gnu* | cloudabi*-eabi* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` ;; android-linux) os=-linux-android basic_machine=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown ;; *) basic_machine=`echo "$1" | sed 's/-[^-]*$//'` if [ "$basic_machine" != "$1" ] then os=`echo "$1" | sed 's/.*-/-/'` else os=; fi ;; esac ### Let's recognize common machines as not being operating systems so ### that things like config.sub decstation-3100 work. We also ### recognize some manufacturers as not being operating systems, so we ### can provide default operating systems below. case $os in -sun*os*) # Prevent following clause from handling this invalid input. ;; -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ -apple | -axis | -knuth | -cray | -microblaze*) os= basic_machine=$1 ;; -bluegene*) os=-cnk ;; -sim | -cisco | -oki | -wec | -winbond) os= basic_machine=$1 ;; -scout) ;; -wrs) os=-vxworks basic_machine=$1 ;; -chorusos*) os=-chorusos basic_machine=$1 ;; -chorusrdb) os=-chorusrdb basic_machine=$1 ;; -hiux*) os=-hiuxwe2 ;; -sco6) os=-sco5v6 basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco5) os=-sco3.2v5 basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco4) os=-sco3.2v4 basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco3.2.[4-9]*) os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco3.2v[4-9]*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco5v6*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco*) os=-sco3.2v2 basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -udk*) basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -isc) os=-isc2.2 basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -clix*) basic_machine=clipper-intergraph ;; -isc*) basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -lynx*178) os=-lynxos178 ;; -lynx*5) os=-lynxos5 ;; -lynx*) os=-lynxos ;; -ptx*) basic_machine=`echo "$1" | sed -e 's/86-.*/86-sequent/'` ;; -psos*) os=-psos ;; -mint | -mint[0-9]*) basic_machine=m68k-atari os=-mint ;; esac # Decode aliases for certain CPU-COMPANY combinations. case $basic_machine in # Recognize the basic CPU types without company name. # Some are omitted here because they have special meanings below. 1750a | 580 \ | a29k \ | aarch64 | aarch64_be \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ | arc | arceb \ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ | avr | avr32 \ | ba \ | be32 | be64 \ | bfin \ | c4x | c8051 | clipper \ | d10v | d30v | dlx | dsp16xx \ | e2k | epiphany \ | fido | fr30 | frv | ft32 \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i860 | i960 | ia16 | ia64 \ | ip2k | iq2000 \ | k1om \ | le32 | le64 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ | mips64octeon | mips64octeonel \ | mips64orion | mips64orionel \ | mips64r5900 | mips64r5900el \ | mips64vr | mips64vrel \ | mips64vr4100 | mips64vr4100el \ | mips64vr4300 | mips64vr4300el \ | mips64vr5000 | mips64vr5000el \ | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ | mipsisa32r6 | mipsisa32r6el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ | mipsisa64r6 | mipsisa64r6el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ | mipsr5900 | mipsr5900el \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nds32 | nds32le | nds32be \ | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ | open8 | or1k | or1knd | or32 \ | pdp10 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ | pru \ | pyramid \ | riscv32 | riscv64 \ | rl78 | rx \ | score \ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ | spu \ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ | ubicom32 \ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ | visium \ | wasm32 \ | x86 | xc16x | xstormy16 | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown ;; c54x) basic_machine=tic54x-unknown ;; c55x) basic_machine=tic55x-unknown ;; c6x) basic_machine=tic6x-unknown ;; leon|leon[3-9]) basic_machine=sparc-$basic_machine ;; m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) basic_machine=$basic_machine-unknown os=-none ;; m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65) ;; ms1) basic_machine=mt-unknown ;; strongarm | thumb | xscale) basic_machine=arm-unknown ;; xgate) basic_machine=$basic_machine-unknown os=-none ;; xscaleeb) basic_machine=armeb-unknown ;; xscaleel) basic_machine=armel-unknown ;; # We use `pc' rather than `unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) basic_machine=$basic_machine-pc ;; # Object if more than one company name word. *-*-*) echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2 exit 1 ;; # Recognize the basic CPU types with company name. 580-* \ | a29k-* \ | aarch64-* | aarch64_be-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ | ba-* \ | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ | c8051-* | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | e2k-* | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | hexagon-* \ | i*86-* | i860-* | i960-* | ia16-* | ia64-* \ | ip2k-* | iq2000-* \ | k1om-* \ | le32-* | le64-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ | microblaze-* | microblazeel-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ | mips64octeon-* | mips64octeonel-* \ | mips64orion-* | mips64orionel-* \ | mips64r5900-* | mips64r5900el-* \ | mips64vr-* | mips64vrel-* \ | mips64vr4100-* | mips64vr4100el-* \ | mips64vr4300-* | mips64vr4300el-* \ | mips64vr5000-* | mips64vr5000el-* \ | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ | mipsisa32r6-* | mipsisa32r6el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ | mipsisa64r6-* | mipsisa64r6el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ | mipsr5900-* | mipsr5900el-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ | mt-* \ | msp430-* \ | nds32-* | nds32le-* | nds32be-* \ | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ | or1k*-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ | pru-* \ | pyramid-* \ | riscv32-* | riscv64-* \ | rl78-* | romp-* | rs6000-* | rx-* \ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ | tahoe-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ | tile*-* \ | tron-* \ | ubicom32-* \ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ | vax-* \ | visium-* \ | wasm32-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* \ | xstormy16-* | xtensa*-* \ | ymp-* \ | z8k-* | z80-*) ;; # Recognize the basic CPU types without company name, with glob match. xtensa*) basic_machine=$basic_machine-unknown ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 386bsd) basic_machine=i386-pc os=-bsd ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) basic_machine=m68000-att ;; 3b*) basic_machine=we32k-att ;; a29khif) basic_machine=a29k-amd os=-udi ;; abacus) basic_machine=abacus-unknown ;; adobe68k) basic_machine=m68010-adobe os=-scout ;; alliant | fx80) basic_machine=fx80-alliant ;; altos | altos3068) basic_machine=m68k-altos ;; am29k) basic_machine=a29k-none os=-bsd ;; amd64) basic_machine=x86_64-pc ;; amd64-*) basic_machine=x86_64-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; amdahl) basic_machine=580-amdahl os=-sysv ;; amiga | amiga-*) basic_machine=m68k-unknown ;; amigaos | amigados) basic_machine=m68k-unknown os=-amigaos ;; amigaunix | amix) basic_machine=m68k-unknown os=-sysv4 ;; apollo68) basic_machine=m68k-apollo os=-sysv ;; apollo68bsd) basic_machine=m68k-apollo os=-bsd ;; aros) basic_machine=i386-pc os=-aros ;; asmjs) basic_machine=asmjs-unknown ;; aux) basic_machine=m68k-apple os=-aux ;; balance) basic_machine=ns32k-sequent os=-dynix ;; blackfin) basic_machine=bfin-unknown os=-linux ;; blackfin-*) basic_machine=bfin-`echo "$basic_machine" | sed 's/^[^-]*-//'` os=-linux ;; bluegene*) basic_machine=powerpc-ibm os=-cnk ;; c54x-*) basic_machine=tic54x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c55x-*) basic_machine=tic55x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c6x-*) basic_machine=tic6x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c90) basic_machine=c90-cray os=-unicos ;; cegcc) basic_machine=arm-unknown os=-cegcc ;; convex-c1) basic_machine=c1-convex os=-bsd ;; convex-c2) basic_machine=c2-convex os=-bsd ;; convex-c32) basic_machine=c32-convex os=-bsd ;; convex-c34) basic_machine=c34-convex os=-bsd ;; convex-c38) basic_machine=c38-convex os=-bsd ;; cray | j90) basic_machine=j90-cray os=-unicos ;; craynv) basic_machine=craynv-cray os=-unicosmp ;; cr16 | cr16-*) basic_machine=cr16-unknown os=-elf ;; crds | unos) basic_machine=m68k-crds ;; crisv32 | crisv32-* | etraxfs*) basic_machine=crisv32-axis ;; cris | cris-* | etrax*) basic_machine=cris-axis ;; crx) basic_machine=crx-unknown os=-elf ;; da30 | da30-*) basic_machine=m68k-da30 ;; decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) basic_machine=mips-dec ;; decsystem10* | dec10*) basic_machine=pdp10-dec os=-tops10 ;; decsystem20* | dec20*) basic_machine=pdp10-dec os=-tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) basic_machine=m68k-motorola ;; delta88) basic_machine=m88k-motorola os=-sysv3 ;; dicos) basic_machine=i686-pc os=-dicos ;; djgpp) basic_machine=i586-pc os=-msdosdjgpp ;; dpx20 | dpx20-*) basic_machine=rs6000-bull os=-bosx ;; dpx2*) basic_machine=m68k-bull os=-sysv3 ;; e500v[12]) basic_machine=powerpc-unknown os=$os"spe" ;; e500v[12]-*) basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` os=$os"spe" ;; ebmon29k) basic_machine=a29k-amd os=-ebmon ;; elxsi) basic_machine=elxsi-elxsi os=-bsd ;; encore | umax | mmax) basic_machine=ns32k-encore ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson os=-ose ;; fx2800) basic_machine=i860-alliant ;; genix) basic_machine=ns32k-ns ;; gmicro) basic_machine=tron-gmicro os=-sysv ;; go32) basic_machine=i386-pc os=-go32 ;; h3050r* | hiux*) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; h8300hms) basic_machine=h8300-hitachi os=-hms ;; h8300xray) basic_machine=h8300-hitachi os=-xray ;; h8500hms) basic_machine=h8500-hitachi os=-hms ;; harris) basic_machine=m88k-harris os=-sysv3 ;; hp300-*) basic_machine=m68k-hp ;; hp300bsd) basic_machine=m68k-hp os=-bsd ;; hp300hpux) basic_machine=m68k-hp os=-hpux ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) basic_machine=m68000-hp ;; hp9k3[2-9][0-9]) basic_machine=m68k-hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) basic_machine=hppa1.1-hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) basic_machine=hppa1.1-hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; hppaosf) basic_machine=hppa1.1-hp os=-osf ;; hppro) basic_machine=hppa1.1-hp os=-proelf ;; i370-ibm* | ibm*) basic_machine=i370-ibm ;; i*86v32) basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` os=-sysv32 ;; i*86v4*) basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` os=-sysv4 ;; i*86v) basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` os=-sysv ;; i*86sol2) basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` os=-solaris2 ;; i386mach) basic_machine=i386-mach os=-mach ;; vsta) basic_machine=i386-unknown os=-vsta ;; iris | iris4d) basic_machine=mips-sgi case $os in -irix*) ;; *) os=-irix4 ;; esac ;; isi68 | isi) basic_machine=m68k-isi os=-sysv ;; leon-*|leon[3-9]-*) basic_machine=sparc-`echo "$basic_machine" | sed 's/-.*//'` ;; m68knommu) basic_machine=m68k-unknown os=-linux ;; m68knommu-*) basic_machine=m68k-`echo "$basic_machine" | sed 's/^[^-]*-//'` os=-linux ;; magnum | m3230) basic_machine=mips-mips os=-sysv ;; merlin) basic_machine=ns32k-utek os=-sysv ;; microblaze*) basic_machine=microblaze-xilinx ;; mingw64) basic_machine=x86_64-pc os=-mingw64 ;; mingw32) basic_machine=i686-pc os=-mingw32 ;; mingw32ce) basic_machine=arm-unknown os=-mingw32ce ;; miniframe) basic_machine=m68000-convergent ;; *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) basic_machine=m68k-atari os=-mint ;; mips3*-*) basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'` ;; mips3*) basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`-unknown ;; monitor) basic_machine=m68k-rom68k os=-coff ;; morphos) basic_machine=powerpc-unknown os=-morphos ;; moxiebox) basic_machine=moxie-unknown os=-moxiebox ;; msdos) basic_machine=i386-pc os=-msdos ;; ms1-*) basic_machine=`echo "$basic_machine" | sed -e 's/ms1-/mt-/'` ;; msys) basic_machine=i686-pc os=-msys ;; mvs) basic_machine=i370-ibm os=-mvs ;; nacl) basic_machine=le32-unknown os=-nacl ;; ncr3000) basic_machine=i486-ncr os=-sysv4 ;; netbsd386) basic_machine=i386-unknown os=-netbsd ;; netwinder) basic_machine=armv4l-rebel os=-linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony os=-newsos ;; news1000) basic_machine=m68030-sony os=-newsos ;; news-3600 | risc-news) basic_machine=mips-sony os=-newsos ;; necv70) basic_machine=v70-nec os=-sysv ;; next | m*-next) basic_machine=m68k-next case $os in -nextstep* ) ;; -ns2*) os=-nextstep2 ;; *) os=-nextstep3 ;; esac ;; nh3000) basic_machine=m68k-harris os=-cxux ;; nh[45]000) basic_machine=m88k-harris os=-cxux ;; nindy960) basic_machine=i960-intel os=-nindy ;; mon960) basic_machine=i960-intel os=-mon960 ;; nonstopux) basic_machine=mips-compaq os=-nonstopux ;; np1) basic_machine=np1-gould ;; neo-tandem) basic_machine=neo-tandem ;; nse-tandem) basic_machine=nse-tandem ;; nsr-tandem) basic_machine=nsr-tandem ;; nsv-tandem) basic_machine=nsv-tandem ;; nsx-tandem) basic_machine=nsx-tandem ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki os=-proelf ;; openrisc | openrisc-*) basic_machine=or32-unknown ;; os400) basic_machine=powerpc-ibm os=-os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson os=-ose ;; os68k) basic_machine=m68k-none os=-os68k ;; pa-hitachi) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; paragon) basic_machine=i860-intel os=-osf ;; parisc) basic_machine=hppa-unknown os=-linux ;; parisc-*) basic_machine=hppa-`echo "$basic_machine" | sed 's/^[^-]*-//'` os=-linux ;; pbd) basic_machine=sparc-tti ;; pbb) basic_machine=m68k-tti ;; pc532 | pc532-*) basic_machine=ns32k-pc532 ;; pc98) basic_machine=i386-pc ;; pc98-*) basic_machine=i386-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc ;; pentiumpro | p6 | 6x86 | athlon | athlon_*) basic_machine=i686-pc ;; pentiumii | pentium2 | pentiumiii | pentium3) basic_machine=i686-pc ;; pentium4) basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) basic_machine=i586-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentium4-*) basic_machine=i786-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould ;; power) basic_machine=power-ibm ;; ppc | ppcbe) basic_machine=powerpc-unknown ;; ppc-* | ppcbe-*) basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ppcle | powerpclittle) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) basic_machine=powerpcle-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; ppc64-*) basic_machine=powerpc64-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ppc64le | powerpc64little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) basic_machine=powerpc64le-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm ;; pw32) basic_machine=i586-unknown os=-pw32 ;; rdos | rdos64) basic_machine=x86_64-pc os=-rdos ;; rdos32) basic_machine=i386-pc os=-rdos ;; rom68k) basic_machine=m68k-rom68k os=-coff ;; rm[46]00) basic_machine=mips-siemens ;; rtpc | rtpc-*) basic_machine=romp-ibm ;; s390 | s390-*) basic_machine=s390-ibm ;; s390x | s390x-*) basic_machine=s390x-ibm ;; sa29200) basic_machine=a29k-amd os=-udi ;; sb1) basic_machine=mipsisa64sb1-unknown ;; sb1el) basic_machine=mipsisa64sb1el-unknown ;; sde) basic_machine=mipsisa32-sde os=-elf ;; sei) basic_machine=mips-sei os=-seiux ;; sequent) basic_machine=i386-sequent ;; sh5el) basic_machine=sh5le-unknown ;; simso-wrs) basic_machine=sparclite-wrs os=-vxworks ;; sps7) basic_machine=m68k-bull os=-sysv2 ;; spur) basic_machine=spur-unknown ;; st2000) basic_machine=m68k-tandem ;; stratus) basic_machine=i860-stratus os=-sysv4 ;; strongarm-* | thumb-*) basic_machine=arm-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; sun2) basic_machine=m68000-sun ;; sun2os3) basic_machine=m68000-sun os=-sunos3 ;; sun2os4) basic_machine=m68000-sun os=-sunos4 ;; sun3os3) basic_machine=m68k-sun os=-sunos3 ;; sun3os4) basic_machine=m68k-sun os=-sunos4 ;; sun4os3) basic_machine=sparc-sun os=-sunos3 ;; sun4os4) basic_machine=sparc-sun os=-sunos4 ;; sun4sol2) basic_machine=sparc-sun os=-solaris2 ;; sun3 | sun3-*) basic_machine=m68k-sun ;; sun4) basic_machine=sparc-sun ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun ;; sv1) basic_machine=sv1-cray os=-unicos ;; symmetry) basic_machine=i386-sequent os=-dynix ;; t3e) basic_machine=alphaev5-cray os=-unicos ;; t90) basic_machine=t90-cray os=-unicos ;; tile*) basic_machine=$basic_machine-unknown os=-linux-gnu ;; tx39) basic_machine=mipstx39-unknown ;; tx39el) basic_machine=mipstx39el-unknown ;; toad1) basic_machine=pdp10-xkl os=-tops20 ;; tower | tower-32) basic_machine=m68k-ncr ;; tpf) basic_machine=s390x-ibm os=-tpf ;; udi29k) basic_machine=a29k-amd os=-udi ;; ultra3) basic_machine=a29k-nyu os=-sym1 ;; v810 | necv810) basic_machine=v810-nec os=-none ;; vaxv) basic_machine=vax-dec os=-sysv ;; vms) basic_machine=vax-dec os=-vms ;; vpp*|vx|vx-*) basic_machine=f301-fujitsu ;; vxworks960) basic_machine=i960-wrs os=-vxworks ;; vxworks68) basic_machine=m68k-wrs os=-vxworks ;; vxworks29k) basic_machine=a29k-wrs os=-vxworks ;; w65*) basic_machine=w65-wdc os=-none ;; w89k-*) basic_machine=hppa1.1-winbond os=-proelf ;; x64) basic_machine=x86_64-pc ;; xbox) basic_machine=i686-pc os=-mingw32 ;; xps | xps100) basic_machine=xps100-honeywell ;; xscale-* | xscalee[bl]-*) basic_machine=`echo "$basic_machine" | sed 's/^xscale/arm/'` ;; ymp) basic_machine=ymp-cray os=-unicos ;; none) basic_machine=none-none os=-none ;; # Here we handle the default manufacturer of certain CPU types. It is in # some cases the only manufacturer, in others, it is the most popular. w89k) basic_machine=hppa1.1-winbond ;; op50n) basic_machine=hppa1.1-oki ;; op60c) basic_machine=hppa1.1-oki ;; romp) basic_machine=romp-ibm ;; mmix) basic_machine=mmix-knuth ;; rs6000) basic_machine=rs6000-ibm ;; vax) basic_machine=vax-dec ;; pdp11) basic_machine=pdp11-dec ;; we32k) basic_machine=we32k-att ;; sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) basic_machine=sh-unknown ;; cydra) basic_machine=cydra-cydrome ;; orion) basic_machine=orion-highlevel ;; orion105) basic_machine=clipper-highlevel ;; mac | mpw | mac-mpw) basic_machine=m68k-apple ;; pmac | pmac-mpw) basic_machine=powerpc-apple ;; *-unknown) # Make sure to match an already-canonicalized machine name. ;; *) echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2 exit 1 ;; esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) basic_machine=`echo "$basic_machine" | sed 's/digital.*/dec/'` ;; *-commodore*) basic_machine=`echo "$basic_machine" | sed 's/commodore.*/cbm/'` ;; *) ;; esac # Decode manufacturer-specific aliases for certain operating systems. if [ x"$os" != x"" ] then case $os in # First match some system type aliases that might get confused # with valid system types. # -solaris* is a basic system type, with this one exception. -auroraux) os=-auroraux ;; -solaris1 | -solaris1.*) os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; -solaris) os=-solaris2 ;; -unixware*) os=-sysv4.2uw ;; -gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; # es1800 is here to avoid being matched by es* (a different OS) -es1800*) os=-ose ;; # Now accept the basic system types. # The portable systems comes first. # Each alternative MUST end in a * to match a version number. # -sysv* is not here because it comes later, after sysvr4. -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ | -sym* | -kopensolaris* | -plan9* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ | -aos* | -aros* | -cloudabi* | -sortix* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ | -hiux* | -knetbsd* | -mirbsd* | -netbsd* \ | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ | -linux-newlib* | -linux-musl* | -linux-uclibc* \ | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox* | -bme* \ | -midnightbsd*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) case $basic_machine in x86-* | i*86-*) ;; *) os=-nto$os ;; esac ;; -nto-qnx*) ;; -nto*) os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; -sim | -xray | -os68k* | -v88r* \ | -windows* | -osx | -abug | -netware* | -os9* \ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) ;; -mac*) os=`echo "$os" | sed -e 's|mac|macos|'` ;; -linux-dietlibc) os=-linux-dietlibc ;; -linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; -sunos5*) os=`echo "$os" | sed -e 's|sunos5|solaris2|'` ;; -sunos6*) os=`echo "$os" | sed -e 's|sunos6|solaris3|'` ;; -opened*) os=-openedition ;; -os400*) os=-os400 ;; -wince*) os=-wince ;; -utek*) os=-bsd ;; -dynix*) os=-bsd ;; -acis*) os=-aos ;; -atheos*) os=-atheos ;; -syllable*) os=-syllable ;; -386bsd) os=-bsd ;; -ctix* | -uts*) os=-sysv ;; -nova*) os=-rtmk-nova ;; -ns2) os=-nextstep2 ;; -nsk*) os=-nsk ;; # Preserve the version number of sinix5. -sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; -sinix*) os=-sysv4 ;; -tpf*) os=-tpf ;; -triton*) os=-sysv3 ;; -oss*) os=-sysv3 ;; -svr4*) os=-sysv4 ;; -svr3) os=-sysv3 ;; -sysvr4) os=-sysv4 ;; # This must come after -sysvr4. -sysv*) ;; -ose*) os=-ose ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) os=-mint ;; -zvmoe) os=-zvmoe ;; -dicos*) os=-dicos ;; -pikeos*) # Until real need of OS specific support for # particular features comes up, bare metal # configurations are quite functional. case $basic_machine in arm*) os=-eabi ;; *) os=-elf ;; esac ;; -nacl*) ;; -ios) ;; -none) ;; *) # Get rid of the `-' at the beginning of $os. os=`echo $os | sed 's/[^-]*-//'` echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2 exit 1 ;; esac else # Here we handle the default operating systems that come with various machines. # The value should be what the vendor currently ships out the door with their # machine or put another way, the most popular os provided with the machine. # Note that if you're going to try to match "-MANUFACTURER" here (say, # "-sun"), then you have to tell the case statement up towards the top # that MANUFACTURER isn't an operating system. Otherwise, code above # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. case $basic_machine in score-*) os=-elf ;; spu-*) os=-elf ;; *-acorn) os=-riscix1.2 ;; arm*-rebel) os=-linux ;; arm*-semi) os=-aout ;; c4x-* | tic4x-*) os=-coff ;; c8051-*) os=-elf ;; hexagon-*) os=-elf ;; tic54x-*) os=-coff ;; tic55x-*) os=-coff ;; tic6x-*) os=-coff ;; # This must come before the *-dec entry. pdp10-*) os=-tops20 ;; pdp11-*) os=-none ;; *-dec | vax-*) os=-ultrix4.2 ;; m68*-apollo) os=-domain ;; i386-sun) os=-sunos4.0.2 ;; m68000-sun) os=-sunos3 ;; m68*-cisco) os=-aout ;; mep-*) os=-elf ;; mips*-cisco) os=-elf ;; mips*-*) os=-elf ;; or32-*) os=-coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=-sysv3 ;; sparc-* | *-sun) os=-sunos4.1.1 ;; pru-*) os=-elf ;; *-be) os=-beos ;; *-ibm) os=-aix ;; *-knuth) os=-mmixware ;; *-wec) os=-proelf ;; *-winbond) os=-proelf ;; *-oki) os=-proelf ;; *-hp) os=-hpux ;; *-hitachi) os=-hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) os=-sysv ;; *-cbm) os=-amigaos ;; *-dg) os=-dgux ;; *-dolphin) os=-sysv3 ;; m68k-ccur) os=-rtu ;; m88k-omron*) os=-luna ;; *-next) os=-nextstep ;; *-sequent) os=-ptx ;; *-crds) os=-unos ;; *-ns) os=-genix ;; i370-*) os=-mvs ;; *-gould) os=-sysv ;; *-highlevel) os=-bsd ;; *-encore) os=-bsd ;; *-sgi) os=-irix ;; *-siemens) os=-sysv4 ;; *-masscomp) os=-rtu ;; f30[01]-fujitsu | f700-fujitsu) os=-uxpv ;; *-rom68k) os=-coff ;; *-*bug) os=-coff ;; *-apple) os=-macos ;; *-atari*) os=-mint ;; *) os=-none ;; esac fi # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. vendor=unknown case $basic_machine in *-unknown) case $os in -riscix*) vendor=acorn ;; -sunos*) vendor=sun ;; -cnk*|-aix*) vendor=ibm ;; -beos*) vendor=be ;; -hpux*) vendor=hp ;; -mpeix*) vendor=hp ;; -hiux*) vendor=hitachi ;; -unos*) vendor=crds ;; -dgux*) vendor=dg ;; -luna*) vendor=omron ;; -genix*) vendor=ns ;; -mvs* | -opened*) vendor=ibm ;; -os400*) vendor=ibm ;; -ptx*) vendor=sequent ;; -tpf*) vendor=ibm ;; -vxsim* | -vxworks* | -windiss*) vendor=wrs ;; -aux*) vendor=apple ;; -hms*) vendor=hitachi ;; -mpw* | -macos*) vendor=apple ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) vendor=atari ;; -vos*) vendor=stratus ;; esac basic_machine=`echo "$basic_machine" | sed "s/unknown/$vendor/"` ;; esac echo "$basic_machine$os" exit # Local variables: # eval: (add-hook 'write-file-functions 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: bit-babbler-0.9/Makeup/ac-aux/install-sh0000755000000000000000000003577614136173163015050 0ustar #!/bin/sh # install - install a program, script, or datafile scriptversion=2020-11-14.01; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # 'make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. tab=' ' nl=' ' IFS=" $tab$nl" # Set DOITPROG to "echo" to test this script. doit=${DOITPROG-} doit_exec=${doit:-exec} # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_mkdir= # Desired mode of installed file. mode=0755 # Create dirs (including intermediate dirs) using mode 755. # This is like GNU 'install' as of coreutils 8.32 (2020). mkdir_umask=22 backupsuffix= chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false is_target_a_directory=possibly usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -p pass -p to $cpprog. -s $stripprog installed files. -S SUFFIX attempt to back up existing files, with suffix SUFFIX. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG By default, rm is invoked with -f; when overridden with RMPROG, it's up to you to specify -f if you want it. If -S is not specified, no backups are attempted. Email bug reports to bug-automake@gnu.org. Automake home page: https://www.gnu.org/software/automake/ " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -p) cpprog="$cpprog -p";; -s) stripcmd=$stripprog;; -S) backupsuffix="$2" shift;; -t) is_target_a_directory=always dst_arg=$2 # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac shift;; -T) is_target_a_directory=never;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done # We allow the use of options -d and -T together, by making -d # take the precedence; this is for compatibility with GNU install. if test -n "$dir_arg"; then if test -n "$dst_arg"; then echo "$0: target directory not allowed when installing a directory." >&2 exit 1 fi fi if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call 'install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then if test $# -gt 1 || test "$is_target_a_directory" = always; then if test ! -d "$dst_arg"; then echo "$0: $dst_arg: Is not a directory." >&2 exit 1 fi fi fi if test -z "$dir_arg"; then do_exit='(exit $ret); exit $ret' trap "ret=129; $do_exit" 1 trap "ret=130; $do_exit" 2 trap "ret=141; $do_exit" 13 trap "ret=143; $do_exit" 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names problematic for 'test' and other utilities. case $src in -* | [=\(\)!]) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? # Don't chown directories that already exist. if test $dstdir_status = 0; then chowncmd="" fi else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # If destination is a directory, append the input filename. if test -d "$dst"; then if test "$is_target_a_directory" = never; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dstbase=`basename "$src"` case $dst in */) dst=$dst$dstbase;; *) dst=$dst/$dstbase;; esac dstdir_status=0 else dstdir=`dirname "$dst"` test -d "$dstdir" dstdir_status=$? fi fi case $dstdir in */) dstdirslash=$dstdir;; *) dstdirslash=$dstdir/;; esac obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false # The $RANDOM variable is not portable (e.g., dash). Use it # here however when possible just to lower collision chance. tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap ' ret=$? rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null exit $ret ' 0 # Because "mkdir -p" follows existing symlinks and we likely work # directly in world-writeable /tmp, make sure that the '$tmpdir' # directory is successfully created first before we actually test # 'mkdir -p'. if (umask $mkdir_umask && $mkdirprog $mkdir_mode "$tmpdir" && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. test_tmpdir="$tmpdir/a" ls_ld_tmpdir=`ls -ld "$test_tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$test_tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null fi trap '' 0;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; [-=\(\)!]*) prefix='./';; *) prefix='';; esac oIFS=$IFS IFS=/ set -f set fnord $dstdir shift set +f IFS=$oIFS prefixes= for d do test X"$d" = X && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=${dstdirslash}_inst.$$_ rmtmp=${dstdirslash}_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && { test -z "$stripcmd" || { # Create $dsttmp read-write so that cp doesn't create it read-only, # which would cause strip to fail. if test -z "$doit"; then : >"$dsttmp" # No need to fork-exec 'touch'. else $doit touch "$dsttmp" fi } } && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # If $backupsuffix is set, and the file being installed # already exists, attempt a backup. Don't worry if it fails, # e.g., if mv doesn't support -f. if test -n "$backupsuffix" && test -f "$dst"; then $doit $mvcmd -f "$dst" "$dst$backupsuffix" 2>/dev/null fi # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: bit-babbler-0.9/Makeup/ac-fragments/0002755000000000000000000000000014136173163014215 5ustar bit-babbler-0.9/Makeup/ac-fragments/acsubst.sysctl0000644000000000000000000000023514125243667017127 0ustar # @configure_input@ # # Makeup makefile boilerplate for sysctl configuration support. # Copyright 2018, Ron Lee SYSCTL_DIR = @SYSCTL_DIR@ bit-babbler-0.9/Makeup/ac-fragments/acsubst.systemd0000644000000000000000000000022414125243667017274 0ustar # @configure_input@ # # Makeup makefile boilerplate for systemd. # Copyright 2018, Ron Lee SYSTEMD_UNIT_DIR = @SYSTEMD_UNIT_DIR@ bit-babbler-0.9/Makeup/ac-fragments/acsubst.udev0000644000000000000000000000032514125243667016551 0ustar # @configure_input@ # # Makeup makefile boilerplate for udev. # Copyright 2010 - 2018, Ron Lee UDEV_CPPFLAGS = @UDEV_CPPFLAGS@ UDEV_LIBS = @UDEV_LIBS@ UDEV_RULES_DIR = @UDEV_RULES_DIR@ bit-babbler-0.9/Makeup/ac-fragments/configure.i18n0000644000000000000000000001607514125243667016713 0ustar dnl Makeup i18n configure boilerplate. dnl dnl Copyright 2003 - 2021, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. dnl Internal string encoding. dnl XXX Expand this to specify the actual encoding to use. dnl eg. WCHAR_T, utf8, iso88591 etc. AC_ARG_ENABLE([wide_strings], [AS_HELP_STRING([--enable-wide_strings], [use wide characters by default for internal]dnl [ string storage (default NO)])], [mu_cv_enable_wide_strings=$enableval], [mu_cv_enable_wide_strings=no]) AC_DEFINE_UNQUOTED([EM_USE_WIDE_STRINGS], [$(test "$mu_cv_enable_wide_strings" != yes)$?], [use wide characters by default for internal string storage]) AS_IF([test "$mu_cv_enable_wide_strings" = yes],[ makeup_build_flavour="${makeup_build_flavour}w" case $host in *-*-cygwin* | *-*-mingw32* ) AC_DEFINE([UNICODE],[1],[Enable Windows in 'UNICODE' mode]) AC_MSG_NOTICE([using UNICODE]) ;; esac ]) dnl On FreeBSD we need xlocale.h for strtod_l. That header is available on dnl Linux (though we shouldn't normally include it directly), but is not dnl available for mingw, and possibly some other platforms too (since it is dnl not a standard header at this time). AC_CHECK_HEADERS([xlocale.h]) dnl Windows has it's own incarnations of newlocale/strtod_l, and some platforms dnl like OpenBSD (as of 6.1) don't implement the POSIX newlocale at all, let dnl alone the xlocale extensions which use it. All we can really do for those dnl is fall back to using strtod, and if the user is in a locale where the dnl decimal point is a comma or something similar, they get to keep the pieces dnl or will need to override their default locale for running this. AC_CHECK_FUNCS([newlocale]) dnl The _create_locale function needs a later C runtime than the default. AS_IF([test "$ac_cv_func_newlocale" = yes],[], [ACM_PUSH_VAR([$0],[LIBS])dnl ACM_ADD_LIBS([LIBS],[-lmsvcr110]) AC_CHECK_FUNCS([_create_locale]) ACM_POP_VAR([$0],[LIBS])dnl ]) AC_CHECK_FUNCS([strtod_l _strtod_l], [break]) AS_IF([test "$ac_cv_func_strtod_l" = yes],[], [test "$ac_cv_func__strtod_l" = yes],[], [AC_MSG_WARN([No localisable strtod available on this system.]) AC_MSG_WARN([Binaries will need to be run in the "C" locale.])]) dnl String encoding conversion. AC_ARG_WITH([iconv], [AS_HELP_STRING([--with-iconv], [use iconv (from glibc or libiconv) for string encoding]dnl [ conversion (default YES)])], [mu_cv_with_iconv=$withval], [mu_cv_with_iconv=yes]) dnl Localised string substitution. AC_ARG_WITH([gettext], [AS_HELP_STRING([--with-gettext], [use gettext (from glibc or libintl) to localise selected]dnl [ literal strings (default YES)])], [mu_cv_with_gettext=$withval], [mu_cv_with_gettext=yes]) dnl Some of these are covered by AM_GNU_GETTEXT now. dnl AC_ARG_VAR([XGETTEXT],[xgettext command]) dnl AC_ARG_VAR([MSGMERGE],[msgmerge command]) dnl AC_ARG_VAR([MSGFMT],[msgfmt command]) dnl A couple still are not. AC_ARG_VAR([XGETTEXT_ARGS],[xgettext arguments]) AC_ARG_VAR([MSGINIT],[msginit command]) AC_ARG_VAR([ALL_LINGUAS],[The list of supported ISO 639 language codes]) AC_ARG_VAR([GETTEXT_MSG_SRC],[Limit the search for messages to $(GETTEXT_MSG_SRC)/]) dnl This one is needed for the AC_LIB_RPATH macro, required by AM_GNU_GETTEXT. FIND_AND_LINK_IF_LOCAL([config.rpath],[${ac_aux_dir#$srcdir/}],[/usr/share/gettext]) dnl This is needed because AM_ICONV_LINK, which can be pulled in by either AM_ICONV dnl or by AM_GNU_GETTEXT, currently leaks memory when running some test code, which dnl means the check for "working iconv" will wrongly fail if LSan is active. We do dnl need to wrap a much larger scope with this than would be ideal, because we can't dnl know exactly where the offending test will be expanded and run. ACM_SUPPRESS_LSAN([iconv]) dnl We check this before iconv, as it _may_ do some iconv tests dnl that we will not need to repeat, or even hit the cache for. AS_IF([test "$mu_cv_with_gettext" != no],[ dnl FIXME: AM_GNU_GETTEXT pollutes CPPFLAGS with the default path dnl for mingw-cross builds... so for now: ACM_PUSH_VAR([$0],[CPPFLAGS])dnl dnl imported from /usr/share/aclocal/gettext.m4 AM_GNU_GETTEXT([external],[need-ngettext]) ACM_POP_VAR([$0],[CPPFLAGS])dnl dnl These are set by AM_GNU_GETTEXT above now. dnl AC_CHECK_TOOL(XGETTEXT, xgettext) dnl AC_CHECK_TOOL(MSGMERGE, msgmerge) dnl AC_CHECK_TOOL(MSGFMT, msgfmt) dnl This is done implicitly by linking to a built test. dnl AC_CHECK_HEADERS(libintl.h) dnl Making this test useless. dnl if test "$ac_cv_header_libintl_h" != yes; then dnl and we must do this obscenity instead AS_IF([test "$gt_cv_func_gnugettext2_libc" != yes && test "$gt_cv_func_gnugettext2_libintl" != yes],[ AC_MSG_WARN([gettext not supported on this platform. disabling.]) mu_cv_with_gettext=no ],[ dnl These we must still do for ourself. AC_CHECK_TOOL([MSGINIT],[msginit],[:]) XGETTEXT_ARGS="-C -k_ -kP_:1,2 -s" ACM_ADD_LIBS([I18N_LIBS],[\$(LIBINTL)]) dnl Do we really want to always add this unconditionally here? ACM_ADD_LIBS([LIBS],[$LIBINTL]) ]) ]) AC_DEFINE_UNQUOTED([EM_USE_GETTEXT], [$(test "$mu_cv_with_gettext" != yes)$?], [use gettext to localise selected literal strings]) dnl String encoding conversion. AS_IF([test "$mu_cv_with_iconv" != no],[ dnl FIXME: AM_ICONV_LINK pollutes CPPFLAGS with the default path dnl for mingw-cross builds... so for now: ACM_PUSH_VAR([$0],[CPPFLAGS])dnl dnl imported from /usr/share/aclocal/iconv.m4 AM_ICONV ACM_POP_VAR([$0],[CPPFLAGS])dnl dnl and a couple of useful things it does not do. AS_IF([test "$am_cv_func_iconv" = yes],[ case $host in *-*-cygwin* | *-*-mingw32* ) mu_path_iconv=iconv ;; * ) AC_PATH_PROG([mu_path_iconv],[iconv]) ;; esac AS_IF([test -n "$mu_path_iconv"],[ AC_DEFINE_UNQUOTED([ICONV_UTIL_PATH], ["$mu_path_iconv"], [Define this with the path to the iconv utility]) ]) AC_DEFINE_UNQUOTED([HAVE_ICONV_CONST], [$(test "$am_cv_proto_iconv_arg1" != const)$?], [The system iconv requires a const char** second argument]) ACM_ADD_LIBS([I18N_LIBS],[\$(LIBICONV)]) dnl Do we really want to always add this unconditionally too? ACM_ADD_LIBS([LIBS],[$LIBICONV]) ]) ]) ACM_RESTORE_LSAN([iconv]) bit-babbler-0.9/Makeup/ac-fragments/configure.stdtools0000644000000000000000000010370714125243667020006 0ustar dnl Makeup configure boilerplate. dnl dnl Copyright 2003 - 2021, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. # Check standard args. AC_ARG_ENABLE([pipe], [AS_HELP_STRING([--enable-pipe], [use pipes instead of temporary files for]dnl [ faster compilation (default yes)])], [mu_cv_enable_pipe=$enableval], [mu_cv_enable_pipe=yes]) AC_ARG_ENABLE([optimisation], [AS_HELP_STRING([--enable-optimisation], [use compiler optimisation flags (default yes)])], [mu_cv_enable_optimisation=$enableval], [mu_cv_enable_optimisation=yes]) AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug], [enable extra debug code (default yes)])], [mu_cv_enable_debug=$enableval], [mu_cv_enable_debug=yes]) AC_ARG_ENABLE([profile], [AS_HELP_STRING([--enable-profile], [use profiling flags (default no)])], [mu_cv_enable_profiling=$enableval], [mu_cv_enable_profiling=no]) AC_ARG_ENABLE([extra_warnings], [AS_HELP_STRING([--enable-extra_warnings], [use extra compiler warnings (default yes)])], [mu_cv_enable_extra_warnings=$enableval], [mu_cv_enable_extra_warnings=yes]) AC_ARG_ENABLE([werror], [AS_HELP_STRING([--enable-werror], [fail on compile warnings, (default yes for]dnl [ release builds, no for debug builds)])], [ mu_cv_enable_fail_on_warning=$enableval], [ AS_IF([test "$mu_cv_enable_debug" = yes],[ mu_cv_enable_fail_on_warning=no ],[ dnl mu_cv_enable_fail_on_warning=yes dnl Basic tests like for iconv crap out right now dnl with warnings as errors. Disable temporarily. mu_cv_enable_fail_on_warning=no ]) ]) AC_ARG_ENABLE([valgrind_friendly], [AS_HELP_STRING([--enable-valgrind_friendly], [do extra cleanup to be valgrind clean (default no)])], [mu_cv_enable_valgrind_friendly=$enableval], [mu_cv_enable_valgrind_friendly=no]) AC_ARG_ENABLE([bison_deprecated_warnings], [AS_HELP_STRING([--enable-bison_deprecated_warnings], [let bison3 bark about deprecated bison2 constructs]dnl [ (default no)])], [mu_cv_enable_bison_deprecated_warnings=$enableval], [mu_cv_enable_bison_deprecated_warnings=no]) AC_ARG_ENABLE([code_suggestions], [AS_HELP_STRING([--enable-code_suggestions], [let the compiler suggest optimisation and safety]dnl [ changes (default yes)])], [mu_cv_enable_code_suggestions=$enableval], [mu_cv_enable_code_suggestions=yes]) AC_ARG_ENABLE([clang_almost_everything], [AS_HELP_STRING([--enable-clang_almost_everything@<:@=version@:>@], [build with most of clang's -Weverything warnings,]dnl [ optionally specifying the clang version to use]dnl [ (default no)])], [mu_cv_enable_clang_almost_everything=$enableval], [mu_cv_enable_clang_almost_everything=no]) AS_IF([test "$mu_cv_enable_clang_almost_everything" = yes], [mu_cv_prog_cc=${mu_cv_prog_cc:-clang} mu_cv_prog_cxx=${mu_cv_prog_cxx:-clang++}], [test "$mu_cv_enable_clang_almost_everything" != no], [mu_cv_prog_cc=${mu_cv_prog_cc:-clang-$mu_cv_enable_clang_almost_everything} mu_cv_prog_cxx=${mu_cv_prog_cxx:-clang++-$mu_cv_enable_clang_almost_everything}] ) dnl With _FORTIFY_SOURCE=2, it is possible that some conforming programs may fail, dnl but we'll default to the strongest tests and let the individual projects back dnl off from that if needed. It requires optimisation to be enabled, and glibc dnl since version 2.16 will warn if it is used with -O0, so we don't enable it by dnl default if --disable-optimisation was used. The Debian glibc patches out that dnl warning and just silently disables it if not building with optimisation, but dnl we still don't want to have other distro users nagged by that. AC_ARG_ENABLE([fortify_source], [AS_HELP_STRING([--enable-fortify_source@<:@=N@:>@], [compile with -D_FORTIFY_SOURCE=N (default 2]dnl [ if optimisation is enabled)])], [AS_IF([test "$enableval" = yes], [mu_cv_enable_fortify_source=2], [mu_cv_enable_fortify_source=$enableval]) ], [AS_IF([test "$mu_cv_enable_optimisation" = no], [mu_cv_enable_fortify_source=no], [mu_cv_enable_fortify_source=2]) ]) AC_ARG_ENABLE([stack_protector], [AS_HELP_STRING([--enable-stack_protector@<:@=option@:>@], [build with stack protection guards (default strong),]dnl [ may be set to 'strong', 'all', 'explicit', or a]dnl [ numeric value for the ssp-buffer-size parameter])], [AS_IF([test "$enableval" = yes], [mu_cv_enable_stack_protector=strong], [mu_cv_enable_stack_protector=$enableval]) ], [mu_cv_enable_stack_protector=strong]) AC_ARG_ENABLE([relro], [AS_HELP_STRING([--enable-relro], [make process memory read-only after relocation]dnl [ where possible (default yes)])], [mu_cv_enable_relro=$enableval], [mu_cv_enable_relro=yes]) dnl This is usually only a benefit when combined with relro, so link their defaults. AC_ARG_ENABLE([bind_now], [AS_HELP_STRING([--enable-bind_now], [resolve all symbols at process startup so that]dnl [ they can be included in relro (default yes if]dnl [ relro is enabled)])], [mu_cv_enable_bind_now=$enableval], [mu_cv_enable_bind_now=$mu_cv_enable_relro]) dnl Default -fsanitize= options to use with --enable-san. dnl The float-divide-by-zero and float-cast-overflow options are enabled by dnl -fsanitize=undefined with Clang, but GCC does not enable those by default. dnl It should be harmless to enable them explicitly on Clang though. m4_pushdef([mu_default_san_options], [[address,undefined,float-divide-by-zero,float-cast-overflow,integer,nullability]])dnl dnl m4_pushdef([mu_default_tsan_options],[[thread,undefined,integer,nullability]])dnl dnl AC_ARG_ENABLE([san], [AS_HELP_STRING([--enable-san@<:@=sanitizer,...@:>@], [build with runtime sanitiser support (default no), ]dnl [pass a comma-separated list of sanitizers, else ]dnl ["]mu_default_san_options[" will be used])], [AS_IF([test "$enableval" = yes], [mu_cv_enable_san="mu_default_san_options"], [mu_cv_enable_san=$enableval]) ], [mu_cv_enable_san=no]) dnl TSan can't be enabled together with ASan, so give it its own shortcut option. AC_ARG_ENABLE([tsan], [AS_HELP_STRING([--enable-tsan], [shortcut option for ]dnl [--enable-san=]mu_default_tsan_options)], [AS_IF([test "$enableval" = yes], [mu_cv_enable_san="mu_default_tsan_options"], [mu_cv_enable_san=$enableval]) ]) dnl dnl We don't need these anymore, so don't pollute the global namespace with them. m4_popdef([mu_default_tsan_options],[mu_default_san_options])dnl dnl ASan at least is not currently compatible with the _FORTIFY_SOURCE checks, dnl so disable that when the sanitisers are going to be used. AS_IF([test "$mu_cv_enable_san" != no],[mu_cv_enable_fortify_source=no]) AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared], [use dynamic linking (default yes)])], [mu_cv_enable_shared=$enableval], [mu_cv_enable_shared=yes]) AC_ARG_ENABLE([static], [AS_HELP_STRING([--enable-static], [use static linking (default no)])], [ AS_IF([test "$enableval" = yes],[mu_cv_enable_shared=no]) ]) dnl These are separately precious because overriding {C,CXX}FLAGS should not dnl normally mask the C/C++ standard that a project is built with, and that dnl might not be an immediately obvious consequence of setting them explictly. dnl If you really want to override that, do it with these (or by changing the dnl PACKAGE_{C,XX}STD set for the project), which likewise will also preserve dnl whatever other compiler flags would normally be used. AC_ARG_VAR([C_STANDARD], [flags to set the compiler C standard to use]) AC_ARG_VAR([CXX_STANDARD], [flags to set the compiler C++ standard to use]) dnl Not all platforms have GCC as their default compiler anymore, even if it is dnl still available by default. Autoconf still prefers to use GCC by default dnl in the AC_PROG_{CC,CXX} tests though. These variables let the search order dnl be explicitly specified by the user, and let us automatically tweak it for dnl different platforms. AC_ARG_VAR([CC_SEARCH], [space-separated list of which C compiler to prefer]) AC_ARG_VAR([CXX_SEARCH], [space-separated list of which C++ compiler to prefer]) AC_ARG_VAR([RC_SEP], [a hack for excluding windows resource files]) AC_ARG_VAR([ARFLAGS], [options passed to ar]) AC_ARG_VAR([YACCFLAGS], [options passed to bison/yacc]) AC_ARG_VAR([LEXFLAGS], [options passed to flex/lex]) AC_ARG_VAR([PICFLAGS], [extra flags for building dynamically linked object files]) AC_ARG_VAR([HOST_PICFLAGS], [the PICFLAGS needed for the intended host system]) AC_ARG_VAR([PTHREAD_CPPFLAGS], [C/C++ preprocessor flags for thread-safe builds]) AC_ARG_VAR([PTHREAD_LDFLAGS], [C/C++ linker flags for thread-safe builds]) dnl the EXTRAFOO variables allow appending additional flags without completely dnl overriding the normal default set (and/or having to specify them manually dnl just to add some additional option. AC_ARG_VAR([EXTRACPPFLAGS], [extra C preprocessor flags]) AC_ARG_VAR([EXTRACFLAGS], [extra C compiler flags]) AC_ARG_VAR([EXTRACXXFLAGS], [extra C++ compiler flags]) AC_ARG_VAR([EXTRALDFLAGS], [extra linker flags]) AC_ARG_VAR([EXTRAYACCFLAGS], [extra options passed to bison/yacc]) AC_ARG_VAR([EXTRALEXFLAGS], [extra options passed to flex/lex]) AC_ARG_VAR([EXTRALIBS], [extra libraries (to link before LIBS)]) AC_ARG_VAR([MAKEUP_HOST_ARCH], [architecture that targets should be built for]) AC_ARG_VAR([MAKEUP_DEFAULT_LINKAGE], [default linkage for binary targets]) AC_ARG_VAR([DSOEXT], [filename extension for dynamic libraries]) dnl On Linux, FHS 3.0 introduced /run to replace the /var/run directory, dnl FreeBSD and OpenBSD use a similar spec that is documented in hier(7), dnl but they still use /var/run at present. AC_ARG_VAR([SYSTEM_RUNDIR], [System directory for run-time variable data]) dnl Fully expanded paths for the standard installation directories. dnl Normally you should try to avoid using these, and instead use the standard dnl variables for them directly - but there are a few cases, such as paths in dnl configuration files, or in compiled or generated source files, where the dnl normal secondary expansions that some of these contain will not, or cannot dnl occur, and so the fully expanded path, using the value of $exec_prefix and dnl $prefix at configure time must be used instead. ACM_EXPAND_DIR([EXP_PREFIX],[$prefix]) ACM_EXPAND_DIR([EXP_EXEC_PREFIX],[$exec_prefix]) ACM_EXPAND_DIR([EXP_BINDIR],[$bindir]) ACM_EXPAND_DIR([EXP_SBINDIR],[$sbindir]) ACM_EXPAND_DIR([EXP_INCLUDEDIR],[$includedir]) ACM_EXPAND_DIR([EXP_LIBDIR],[$libdir]) ACM_EXPAND_DIR([EXP_DATADIR],[$datadir]) ACM_EXPAND_DIR([EXP_DOCDIR],[$docdir]) ACM_EXPAND_DIR([EXP_MANDIR],[$mandir]) ACM_EXPAND_DIR([EXP_LOCALEDIR],[$localedir]) # Oddly enough, the most preferred compiler is a platform specific thing, not a # universal truth. Who could have guessed ... dnl Keeping this list current with the changing Winds of Whim could become a dnl rather tedious and fragile thing, so it's tempting to default to checking dnl for cc and c++ first everywhere, on the assumption that all modern systems dnl now have that as an alias to their actually preferred toolchains, but that dnl has the downside of making it less obvious exactly which compiler is being dnl used, and making it even more fragile if some user has changed it from what dnl the normal platform default would otherwise be ... So let's see how this dnl goes for a while. At present the platform needing this most is OpenBSD, dnl since it still ships an ancient "last of the GPLv2" gcc in its base set, dnl but actually has clang as its default and preferred compiler. case $host in *-*-openbsd* | *-*-freebsd* | *-*-darwin* ) dnl OpenBSD (as of 6.2) still has GCC 4.2.1 installed in its base set, dnl but "defaults" to clang (which is what /usr/bin/cc points to), so dnl test for a working clang before gcc there. dnl dnl FreeBSD 11 considers clang to be its default compiler, and though dnl it ships with gcc 5-7, there seem to be an ever increasing number dnl of ways in which the GCC toolchain there is broken. We already dnl had workarounds for broken optimisation there (see the platform dnl specific toolchain tests below), and now we find that the version dnl of binutils it is using is known to be broken with --enable-relro dnl too (https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=219035#c4) dnl so stop swimming against the tide there and default it to clang. dnl dnl Likewise for MacOS, it being both a fork of FreeBSD and having a dnl near existential dread of the letters GPL. AS_IF([test -z "$CC_SEARCH"],[CC_SEARCH="clang gcc cc"]) AS_IF([test -z "$CXX_SEARCH"],[CXX_SEARCH="clang++ g++ c++"]) ;; * ) dnl By default, do what autoconf would otherwise do and prefer GCC, dnl except our second choice is clang (which it entirely ignores), dnl and we don't bother looking for the obscure C++ compilers which dnl it would check for if it doesn't find g++ or c++. When someone dnl proves they want them, and that they can compile our code, then dnl we can revise this list to add them. dnl dnl Ideally, we'd have defaulted to calling AC_PROG_{CC,CXX} with an dnl empty argument, and just let it do its own default thing, but that dnl macro is too broken to enable that, it checks if the argument is dnl empty during the m4 pass, so it considers an empty variable to be dnl an explicit list (and then fails at runtime with no compilers to dnl check) - and we can't AS_IF it and call it either with or without dnl arguments at runtime, because there are tests in there which will dnl only expand once, and so everything falls apart when they are only dnl expanded in the dead branch ... The assumption that it will only dnl ever appear once in one code path goes deep there. AS_IF([test -z "$CC_SEARCH"],[CC_SEARCH="gcc clang cc"]) AS_IF([test -z "$CXX_SEARCH"],[CXX_SEARCH="g++ clang++ c++"]) ;; esac SYSTEM_RUNDIR="/var/run" RC_SEP="#" case $host in *-*-linux* ) dnl Used in the make subsystem for rule selection. MAKEUP_HOST_ARCH="ELF" dnl Used to define the link names to the config header. makeup_build_platform="linux" DSOEXT=".so" dnl Don't do this by default anymore. It went out of vogue with gcc3. dnl CPPFLAGS="$CPPFLAGS -DUSE_GCC_PRAGMA" HOST_PICFLAGS="-fPIC" SYSTEM_RUNDIR="/run" ;; *-*-*bsd* | *-*-darwin* ) MAKEUP_HOST_ARCH="ELF" makeup_build_platform="bsd" DSOEXT=".so" HOST_PICFLAGS="-fPIC" ;; *-*-cygwin* | *-*-mingw32* ) MAKEUP_HOST_ARCH="PE" makeup_build_platform="msw" DSOEXT=".dll" HOST_PICFLAGS="-D_DLL=1 -D_WINDLL=1" AC_ARG_VAR([WINRCFLAGS], [options passed to windres]) WINRCFLAGS="--include-dir /usr/$host_alias/include" AS_IF([test -n "$mu_cv_with_wx_build_dir"],[ WINRCFLAGS="$WINRCFLAGS --include-dir $mu_cv_with_wx_build_dir/../include" ]) WINRCFLAGS="$WINRCFLAGS --define __WIN32__ --define __WIN95__ --define __GNUWIN32__" RC_SEP= ;; * ) AC_MSG_ERROR([Unknown host type. Stopping.]) ;; esac dnl This one may be added to later by user config that adds flavours. dnl At this level there are only two flavours, 'd' - debug, and 'r' - release. AS_IF([test "$mu_cv_enable_debug" = yes],[makeup_build_flavour=d],[makeup_build_flavour=r]) AS_IF([test "$mu_cv_enable_shared" = yes],[ MAKEUP_DEFAULT_LINKAGE="shared" PICFLAGS="\$(HOST_PICFLAGS)" ],[ MAKEUP_DEFAULT_LINKAGE="static" ]) # Check standard tools. dnl We might be overriding these based on an option request or test. dnl But we'll still want to check that they actually work. CC=${CC:-$mu_cv_prog_cc} CXX=${CXX:-$mu_cv_prog_cxx} dnl If the user explicitly set C/CXXFLAGS, and we have an explicitly specified dnl language standard to use, then prepend that to the *FLAGS but otherwise we dnl respect the user's selection of FLAGS and don't modify them here. If they dnl are not set yet, then we set them here to stop AC_PROG_* from adding its dnl own default options to them and flag that we'll be adding our own lot once dnl we've done the basic checks for a working toolchain. AS_IF([test "${CFLAGS+set}" = set],[ AS_IF([test -n "$C_STANDARD"],[CFLAGS="$C_STANDARD${CFLAGS:+ $CFLAGS}"]) ],[ CFLAGS=$C_STANDARD mu_use_our_cflags=yes ]) AS_IF([test "${CXXFLAGS+set}" = set],[ AS_IF([test -n "$CXX_STANDARD"],[CXXFLAGS="$CXX_STANDARD${CXXFLAGS:+ $CXXFLAGS}"]) ],[ CXXFLAGS=$CXX_STANDARD mu_use_our_cxxflags=yes ]) AC_MSG_NOTICE([Using ${C_STANDARD:-toolchain default} C standard]) AC_MSG_NOTICE([Using ${CXX_STANDARD:-toolchain default} C++ standard]) AC_PROG_CC([$CC_SEARCH]) AC_PROG_CPP AC_PROG_CXX([$CXX_SEARCH]) AC_PROG_CXXCPP dnl If we explicitly set the C/C++ standard to use, then ensure that is passed dnl when the preprocessor is run during the tests that follow. This is a bit dnl sketchy, because really this ought to be done as part of testing for how to dnl run the preprocessor above - and there are no separate variables for the dnl preprocessor flags for C and C++, the autoconf tests just use CPPFLAGS for dnl both, which is a bit difficult when we want to specify a C or C++ standard dnl to use in mixed code. If we don't do this though, then we can see dodgy or dnl misleading test results for things like AC_CHECK_HEADERS which runs both dnl the compiler and preprocessor as separate tests. If CFLAGS or CXXFLAGS set dnl a standard to use and the preprocessor flags do not, then the results could dnl be conflicting when things which do vary according to the standard that is dnl being used are involved. Fortunately, CPP and CXXCPP generally aren't used dnl very often outside of the feature tests here, and if there is a problem it dnl will probably shake out fairly early in the first test which does use it. AS_IF([test -n "$C_STANDARD"],[CPP="$CPP $C_STANDARD"]) AS_IF([test -n "$CXX_STANDARD"],[CXXCPP="$CXXCPP $CXX_STANDARD"]) AC_PROG_LEX dnl AC_PROG_YACC dnl Do this instead, we want real bison usually and not the crippled yaccalike. AC_CHECK_TOOL([YACC],[bison],[:]) AC_PROG_RANLIB AC_CHECK_TOOL([AR],[ar],[:]) AC_CHECK_TOOL([WINDRES],[windres],[:]) AC_PROG_LN_S AC_PROG_INSTALL AC_CHECK_PROGS([LCOV],[lcov],[:]) AC_CHECK_PROGS([GENHTML],[genhtml],[:]) # Configure toolchain options. dnl These we always apply, even when the user explicitly set *FLAGS manually. AS_IF([test "$mu_cv_enable_pipe" = yes], [ACM_ADD_OPT([CFLAGS,CXXFLAGS],[-pipe])]) dnl And this one is a little special, because we have to pass it to the linker dnl as well, but we don't have separate linker flags for C and C++ which means dnl we'll always need to enable it for both together while that stays true. AS_IF([test "$mu_cv_enable_profiling" = yes], [ACM_ADD_OPT([CFLAGS,CXXFLAGS,LDFLAGS],[-pg])]) dnl Build the lists of common, C, and C++ compiler flags which we only use if dnl CFlAGS and CXXFLAGS were not explicitly overridden by the user. We don't dnl need to test for these, they should be supported by all toolchains we use. dnl It's the common options we are most interested in here, so that we do not dnl need to duplicate them and the enable logic for both CFLAGS and CXXFLAGS. mu_common_flags= mu_cflags= mu_cxxflags= AS_IF([test "$mu_cv_enable_optimisation" = yes], [ACM_ADD_OPT([mu_common_flags],[-O2])]) AS_IF([test "$mu_cv_enable_debug" = yes], [ACM_ADD_OPT([mu_common_flags],[-g])]) AS_IF([test "$mu_cv_enable_fail_on_warning" = yes], [ACM_ADD_OPT([mu_common_flags],[-Werror])]) dnl Always use -Wall unless *FLAGS is explicitly overridden. ACM_ADD_OPT([mu_common_flags],[-Wall]) dnl These are enabled by default unless --disable-extra_warnings was used. AS_IF([test "$mu_cv_enable_extra_warnings" = yes],[ ACM_ADD_OPT([mu_common_flags],[-Wextra, -Wpointer-arith, -Wcast-qual, -Wcast-align, -Wformat=2, -Wfloat-equal]) ACM_ADD_OPT([mu_cflags],[-Wstrict-prototypes, -Wmissing-prototypes]) ACM_ADD_OPT([mu_cxxflags],[-Woverloaded-virtual]) ]) dnl Set CFLAGS using the options from above, if the user didn't override it. AS_IF([test "$mu_use_our_cflags" = yes],[ ACM_ADD_OPT([CFLAGS],[$mu_common_flags,$mu_cflags]) ]) dnl Set CXXFLAGS using the options from above, if the user didn't override it. AS_IF([test "$mu_use_our_cxxflags" = yes],[ ACM_ADD_OPT([CXXFLAGS],[$mu_common_flags,$mu_cxxflags]) ]) dnl Nothing should need these after this, so let's get them out of the global dnl namespace again so as to be perfectly clear about that in the future. m4_foreach([var],[mu_common_flags,mu_cflags,mu_cxxflags],[ AS_UNSET([var])dnl ]) dnl This option is disabled by default and if enabled sets clang as the default dnl CC and CXX, so we still add these to *FLAGS even if the user supplied their dnl own preferred set. If they don't want these, they can just not enable them. AS_IF([test "$mu_cv_enable_clang_almost_everything" != no],[ dnl Options common to both clang and clang++ for C and C++. ACM_ADD_OPT([CFLAGS,CXXFLAGS],[-Weverything, -Wno-c99-extensions, -Wno-vla-extension, -Wno-vla, -Wno-gnu-zero-variadic-macro-arguments, -Wno-variadic-macros, -Wno-disabled-macro-expansion, -Wno-undef, -Wno-padded, -Wno-packed, -Wno-documentation-html, -Wno-documentation-unknown-command])dnl dnl (There are currently no) Extra C specific options. dnl ACM_ADD_OPT([CFLAGS],[]) dnl Extra C++ specific options. ACM_ADD_OPT([CXXFLAGS],[-Wno-c++11-long-long, -Wno-exit-time-destructors, -Wno-global-constructors, -Wno-weak-vtables, -Wno-weak-template-vtables, -Wno-shadow])dnl dnl The above assumes a baseline of clang 3.5.0 as the minimum supported dnl version. We test for the options which were added in later versions. ACM_ADD_COMPILER_WARNING([C,CXX],[no-reserved-id-macro, no-format-pedantic, no-double-promotion])dnl ACM_ADD_COMPILER_WARNING([CXX],[no-shadow-field-in-constructor])dnl dnl The "override" keyword was added in C++11, so don't whine about it dnl not being used if we are building to an earlier standards version. dnl Clang itself should know this, but as of clang 11.0.1 appears not to. AS_CASE([$CXX_STANDARD], [*++98|*++03], [ACM_ADD_COMPILER_WARNING([CXX],[no-suggest-destructor-override, no-suggest-override])]dnl )dnl ]) dnl This option enables toolchain diagnostics which suggest ways that the code dnl can be changed or annotated to enable optimisations or safety checks that dnl are probably applicable, but which the compiler cannot determine with 100% dnl certainty that all the required conditions will always be met. dnl dnl We need to test if these extra warning options are actually supported by dnl the toolchain in use, we can't safely assume that they are with this lot. dnl The -Wsuggest-attribute options are currently GCC specific. AS_IF([test "$mu_cv_enable_code_suggestions" = yes],[ ACM_ADD_COMPILER_WARNING([C,CXX],[suggest-attribute=format, suggest-attribute=const, suggest-attribute=pure, suggest-attribute=noreturn, suggest-attribute=malloc, suggest-attribute=cold]) ]) dnl We need a custom template for this one. The alternative is running the dnl compiler/pre-processor to test for it, but that seems like overkill here. AH_VERBATIM([_FORTIFY_SOURCE], [/* Build with libc buffer overflow checks enabled. We need to guard this, because on some platforms the toolchain will already define it as a builtin, and then emit warnings if we redefine it. Ideally, we'd undefine it here and then force our choice of strictness, but we can't do that with autoheader because it sees that as a hook to rewrite. So just let people (yes, we're looking at you Gentoo) reap what they've sown if the toolchain or the environment they use has already defined it. */ #ifndef _FORTIFY_SOURCE # undef _FORTIFY_SOURCE #endif]) AS_IF([test "$mu_cv_enable_fortify_source" != no],[ AC_DEFINE_UNQUOTED([_FORTIFY_SOURCE],[$mu_cv_enable_fortify_source]) ]) dnl We use the case here as a portable test for if this was set to a numeric dnl value, for use with the older stack protector options, or a string value dnl to use one of the newer ones. It's split between two separate case tests dnl so we can fall back to the older method if the toolchain doesn not support dnl the newer options. We may want to special-case that further for some dnl options like 'explicit' which aren't exactly a superset of that, but for dnl now this gives us reasonable fallback behaviour for a default of 'strong'. dnl dnl We need to include the -fstack-protector option in both compiler and linker dnl flags so that libssp will be linked in correctly on platforms where it is dnl needed because the functions it provides are not integrated with libc. AS_CASE([$mu_cv_enable_stack_protector], [no],dnl Option is disabled [], [[''|*[!0-9]*]],dnl Value is not numeric [ACM_ADD_COMPILE_LINK_OPTION([C,CXX],[-fstack-protector-$mu_cv_enable_stack_protector]) AS_VAR_PUSHDEF([spvar],[mu_cv_ldflag_-fstack-protector-$mu_cv_enable_stack_protector]) AS_VAR_IF([spvar],[yes],[],[mu_cv_enable_stack_protector=4]) AS_VAR_POPDEF([spvar]) ] ) AS_CASE([$mu_cv_enable_stack_protector], [[''|*[!0-9]*]],dnl Value is not numeric [], [*],dnl Value is numeric [ACM_ADD_COMPILE_LINK_OPTION([C,CXX], [-fstack-protector --param ssp-buffer-size=$mu_cv_enable_stack_protector]) ] ) dnl Testing for these is a bit awkward, because unknown ld -z keywords will be dnl "ignored for Solaris compatibility", but we do still need to test for them dnl because the mingw linker at least does not support the -z option ... AS_IF([test "$mu_cv_enable_relro" = yes],[ ACM_ADD_LINKER_OPTION([[-Wl,-z,relro]]) ]) AS_IF([test "$mu_cv_enable_bind_now" = yes],[ ACM_ADD_LINKER_OPTION([[-Wl,-z,now]]) ]) dnl Add the needed toolchain options for any requested runtime sanitisers. AS_IF([test "$mu_cv_enable_san" != no],[ACM_ADD_SANITIZER([$mu_cv_enable_san])]) dnl Add any platform specific toolchain flags that are generally needed. case $host in *-*-freebsd* ) AC_LANG_PUSH([C++]) dnl On FreeBSD 11, both gcc6 and gcc7 will miscompile code when the dnl -fguess-branch-probability optimisation is enabled (which it is dnl with anything above -O0). We don't currently have a trivial test dnl case for that which we can use here, but the symptom is having an dnl exception which should normally be safely caught, instead invoke dnl terminate and kill the application. It would appear that the dnl stack context for unwinding is being lost in some code paths by dnl this optimisation, since an exception thrown in one path will be dnl fine, but one right next to it in another will explode. dnl dnl So until we have some proof of it being fixed, disable that when dnl using g++. We can't actually directly test if we are using g++, dnl because clang lies and defines all of gcc's macros, so instead dnl we can only test if we are not using clang (which would choke on dnl this test anyway, since it doesn't currently support that flag dnl in any case. ACM_PUSH_VAL([$0],[CXXFLAGS],[-fno-guess-branch-probability])dnl AC_CACHE_CHECK([if $CXX needs -fno-guess-branch-probability], [mu_cv_flag_guess_branch_probability], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[ #ifdef __clang__ using the clang compiler #endif ]] )], [mu_cv_flag_guess_branch_probability=yes], [mu_cv_flag_guess_branch_probability=no] ) ]) AS_IF([test "$mu_cv_flag_guess_branch_probability" = yes], [], [ACM_POP_VAR([$0],[CXXFLAGS])])dnl dnl And as above -freorder-blocks can cause the same symptom to manifest dnl on FreeBSD 11 with both gcc6 and gcc7, just in different places in dnl the code. ACM_PUSH_VAL([$0],[CXXFLAGS],[-fno-reorder-blocks])dnl AC_CACHE_CHECK([if $CXX needs -fno-reorder-blocks], [mu_cv_flag_reorder_blocks], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[ #ifdef __clang__ using the clang compiler #endif ]] )], [mu_cv_flag_reorder_blocks=yes], [mu_cv_flag_reorder_blocks=no] ) ]) AS_IF([test "$mu_cv_flag_reorder_blocks" = yes],[],[ACM_POP_VAR([$0],[CXXFLAGS])])dnl dnl And yet one more, that does it in yet another place. dnl If these continue to shake out, it might be safer to just build dnl with -O0 on FreeBSD 11 with gcc ... ACM_PUSH_VAL([$0],[CXXFLAGS],[-fno-tree-dominator-opts])dnl AC_CACHE_CHECK([if $CXX needs -fno-tree-dominator-opts], [mu_cv_flag_tree_dominator_opts], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[ #ifdef __clang__ using the clang compiler #endif ]] )], [mu_cv_flag_tree_dominator_opts=yes], [mu_cv_flag_tree_dominator_opts=no] ) ]) AS_IF([test "$mu_cv_flag_tree_dominator_opts" = yes], [], [ACM_POP_VAR([$0],[CXXFLAGS])])dnl AC_LANG_POP([C++]) ;; esac PTHREAD_CPPFLAGS="-pthread" PTHREAD_LDFLAGS="-pthread" ACM_PUSH_VAL([$0],[CPPFLAGS],[$PTHREAD_CPPFLAGS])dnl AC_CACHE_CHECK([if _REENTRANT is defined by the compiler], [mu_cv_have_reentrant], [AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ #ifndef _REENTRANT #error "_REENTRANT was not defined" #endif ]]) ], [mu_cv_have_reentrant=yes], [mu_cv_have_reentrant=no] )] ) ACM_POP_VAR([$0],[CPPFLAGS])dnl AS_IF([test "$mu_cv_have_reentrant" != yes],[ ACM_ADD_OPT([PTHREAD_CPPFLAGS],[-D_REENTRANT]) ]) dnl add 's' here and omit ranlib from the build step ARFLAGS=rDvs dnl bison3 complains loudly about a bunch of constructs that must still be used dnl if compatibility with bison2 is required, and appears to give us no clean dnl way to deal with that at all. We can tell bison3 not to bark by passing it dnl the -Wno-deprecated option, except bison2 chokes and dies on that too ... dnl Disabling those warnings is sub-optimal, but so is scaring end-users with dnl them and/or maintaining two sets of grammar files, or autogenerating them dnl just for a couple of gratuitously renamed defines. So the least ugly option dnl we have appears to be to test for bison3 here and disable those warnings if dnl it's the version in use, unless they were enabled explicitly by the user dnl who ran configure. At least for the next few years while most of the world dnl is still using bison2 ... AS_IF([test "$mu_cv_enable_bison_deprecated_warnings" = no],[ AS_IF([test "$YACC" != ":"],[ AS_IF([$YACC -Wno-deprecated -V > /dev/null 2>&1],[ mu_yacc_flags=" -Wno-deprecated" AC_MSG_NOTICE([disabled bison3 deprecation warnings]) ]) ]) ]) YACCFLAGS="-d$mu_yacc_flags \$(EXTRAYACCFLAGS)" LEXFLAGS="\$(EXTRALEXFLAGS)" dnl This one's work here is done now too, we don't need it elsewhere. AS_UNSET([mu_yacc_flags]) dnl Macros to define in the private config header. AC_DEFINE_UNQUOTED([EMDEBUG], [$(test "$mu_cv_enable_debug" != yes)$?], [build with additional debugging code]) AC_DEFINE_UNQUOTED([EM_USE_VALGRIND_FRIENDLY], [$(test "$mu_cv_enable_valgrind_friendly" != yes)$?], [do extra cleanup to be valgrind clean]) AC_DEFINE_UNQUOTED([EM_SYSTEM_RUNDIR],["$SYSTEM_RUNDIR"], [System directory for run-time variable data]) bit-babbler-0.9/Makeup/ac-fragments/configure.sysctl0000644000000000000000000000526614125243667017455 0ustar dnl Makeup configure boilerplate. dnl dnl Copyright 2018, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. dnl Only enable this by default on platforms where we expect sysctl.d to be dnl present. People can still --enable-sysctl to override that if needed. mu_expect_sysctl=no case $host in *-*-linux* ) mu_expect_sysctl=yes ;; esac dnl Option and substvar for installing sysctl snippets. AC_ARG_ENABLE([sysctl], [AS_HELP_STRING([--enable-sysctl], [install sysctl configuration files (default yes on Linux, else no)])], [mu_cv_enable_sysctl=$enableval], [mu_cv_enable_sysctl=$mu_expect_sysctl]) dnl Don't bother setting SYSCTL_DIR if we're using --disable-sysctl. It being dnl empty provides a signal to not try and install any files in that case too. dnl dnl The systemd.pc provides what it expects to be the system path for these, dnl but there's some additional hackery we need to do because systemd assumes dnl everything will be installed into the vendor space and doesn't respect an dnl alternate $prefix for local builds, hardcoding only the one which systemd dnl itself was built with. dnl dnl So right now, the least horrid thing we can do is, if we are building with dnl --prefix=/usr, then we install them to the vendor sysctl dir reported by dnl systemd.pc, otherwise we install them to $libdir/sysctl.d. If users need dnl or want something different to that, then they can override the SYSCTL_DIR dnl variable explicitly. This should work ok for --prefix=/usr/local, because dnl both systemd-sysctl and sysctl(8) will search there too, but it won't work dnl by default for other oddball values of $prefix. We could default to using dnl /etc/sysctl.d for the oddball case, but if people are using an odd prefix, dnl then they probably have their own plan for mapping that to locations which dnl will actually be searched, so for now at least, we'll respect that too. AS_IF([test "$mu_cv_enable_sysctl" = yes],[ ACM_IF_VENDOR_BUILD([ACM_PKG_CONFIG_GET_VAR([SYSCTL_DIR],[systemd], [sysctldir],[$EXP_LIBDIR/sysctl.d])], [AC_MSG_CHECKING([for SYSCTL_DIR]) SYSCTL_DIR="$EXP_LIBDIR/sysctl.d" AC_MSG_RESULT([$SYSCTL_DIR (for prefix=$EXP_PREFIX)]) ]) ]) AC_ARG_VAR([SYSCTL_DIR], [where to install sysctl configuration]) bit-babbler-0.9/Makeup/ac-fragments/configure.systemd0000644000000000000000000000641214125243667017616 0ustar dnl Makeup configure boilerplate. dnl dnl Copyright 2018, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. dnl There are platforms where we know systemd won't be present, so don't annoy dnl their users with its cruft unless we're explicitly passed --enable-systemd. mu_expect_systemd=no case $host in *-*-linux* ) mu_expect_systemd=yes ;; esac dnl Option and substvar for installing systemd units. AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [install systemd unit files (default yes on Linux, else no)])], [mu_cv_enable_systemd=$enableval], [mu_cv_enable_systemd=$mu_expect_systemd]) dnl Don't bother setting SYSTEMD_UNIT_DIR if we're using --disable-systemd. dnl It being empty provides a signal to not try and install any units in that dnl case too. dnl dnl There's some additional hackery we need to do here, because systemd has a dnl deep assumption of everything being installed into vendor space and the dnl systemd.pc it creates doesn't doesn't respect an alternative $prefix, it dnl just hardcodes the one which systemd itself was built with. dnl dnl So right now, the least horrid thing we can do is, if we are building with dnl --prefix=/usr, then we install units to the vendor unit dir reported by dnl systemd.pc, otherwise we install them to $libdir/systemd/system. If users dnl need or want something different to that then they can explicitly override dnl the SYSTEMD_UNIT_DIR variable. This should work ok for --prefix=/usr/local, dnl because systemd will search there too, but it won't work by default for any dnl other oddball values of $prefix. We could default to systemdsystemconfdir dnl (aka /etc/systemd/system) for the oddball case, but if people are using an dnl odd prefix, then they probably have their own plan for mapping that to dnl locations which will actually be searched, so for now at least, we will dnl respect that too. dnl dnl The fallback for ACM_PKG_CONFIG_GET_VAR here should probably instead be dnl $EXP_LIBDIR/systemd/system too, but on systems with "split usr" the vendor dnl units are installed in /lib, and on those with "merged usr" both /lib and dnl /usr/lib point to the same place anyway, so it's probably the safest path dnl to default to here for now if pkg-config support is not available. AS_IF([test "$mu_cv_enable_systemd" = yes],[ ACM_IF_VENDOR_BUILD([ACM_PKG_CONFIG_GET_VAR([SYSTEMD_UNIT_DIR],[systemd], [systemdsystemunitdir],[/lib/systemd/system])], [AC_MSG_CHECKING([for SYSTEMD_UNIT_DIR]) SYSTEMD_UNIT_DIR="$EXP_LIBDIR/systemd/system" AC_MSG_RESULT([$SYSTEMD_UNIT_DIR (for prefix=$EXP_PREFIX)]) ]) ]) AC_ARG_VAR([SYSTEMD_UNIT_DIR], [where to install systemd units]) AC_DEFINE_UNQUOTED([EM_USE_NOTIFY_SOCKET], [$(test "$mu_cv_enable_systemd" != yes)$?], [Build with service manager NOTIFY_SOCKET support]) bit-babbler-0.9/Makeup/ac-fragments/configure.udev0000644000000000000000000001240414125243667017067 0ustar dnl Makeup configure boilerplate. dnl dnl Copyright 2010 - 2021, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. dnl There are platforms where we know udev won't be present, so don't bother dnl testing for it at all there unless someone explicitly passes --with-udev. mu_expect_udev=no case $host in *-*-linux* ) mu_expect_udev=yes ;; esac dnl Option, check, and substvars for libudev. dnl --with-udev="rules only" is handled as a special case where libudev support dnl is not needed or wanted, but UDEV_RULES_DIR is for installing rules files. AC_ARG_WITH([udev], [AS_HELP_STRING([--with-udev], [use libudev for device detection (default yes on Linux, else no)])], [mu_cv_with_udev=$withval], [mu_cv_with_udev=$mu_expect_udev]) dnl Since udev merged with systemd, upstream claims to no longer support dnl static linking. In practice this means Wheezy was the last release dnl to ship with libudev.a, but even there support for it is crappy - as dnl it uses clock_gettime but libudev.pc does not declare the dependency dnl on librt required for that before it was moved to libc (for Jessie). dnl So now that even Wheezy-LTS is officially EOL, there's not much point dnl in us working around its brokenness, all we can do is warn users that dnl they can either have libudev or (maybe) static linking (if nothing dnl else required is also broken for that, which is far from assured in dnl these Modern Times We Live In). AS_IF([test "$mu_cv_with_udev" != no],[ AS_IF([test "$mu_cv_enable_shared" = no],[ AC_MSG_WARN([libudev does not support static linking]) mu_cv_with_udev="rules only" ], [test "$mu_cv_with_udev" != "rules only"],[ AC_CHECK_LIB([udev],[udev_new], [ AC_DEFINE([HAVE_LIBUDEV],[1],[libudev is available]) UDEV_CPPFLAGS= ACM_PKG_CONFIG_GET_LIBS([UDEV_LIBS],[libudev],[udev]) mu_cv_with_udev=yes dnl The udev_device_get_tags_list_entry function was added in udev 154 dnl and udev_device_get_sysattr_list_entry was added in udev release 167, dnl which is newer than some current distro releases are shipping still. dnl RHEL/CentOS 6 at least appears to ship udev 147 and it isn't EOL yet. ACM_PUSH_VAL([$0],[LIBS],[-l$UDEV_LIBS])dnl AC_CHECK_FUNCS([udev_device_get_tags_list_entry udev_device_get_sysattr_list_entry]) ACM_POP_VAR([$0],[LIBS])dnl ],[ AS_IF([test "$mu_cv_with_udev" != auto],[ AC_MSG_WARN([libudev not found]) ACM_ADD_MISSING_DEP([libudev-dev (or configure --without-udev)]) ]) mu_cv_with_udev=no ]) ]) ]) dnl The udev.pc only defines the udevdir root, so we still need to construct dnl the rules directory from that ourselves either way. Don't bother to set dnl UDEV_RULES_DIR if we're building --without-udev. It being empty provides dnl a signal to not try and install any rules in that case too. dnl dnl There's some additional hackery we need to do here, because as with the dnl rest of systemd, the udev.pc doesn't respect any alternative prefix, it dnl simply hardcodes what systemd/udevd themselves were built with. In the dnl case of udev it is even worse though, because it won't search alternative dnl directories to what it was build with. At present, rules are only ever dnl read from /etc, /run, or whatever UDEVLIBEXECDIR was set to when udev was dnl built (typically either /lib or /usr/lib). dnl dnl So right now, the least horrid thing we can do is, if we are building with dnl --prefix=/usr, then we install rules to the vendor rules dir reported by dnl udev.pc, otherwise we install them to the /etc/udev/rules.d directory. dnl If users need or want something different to that then they can explicitly dnl override the UDEV_DIR variable. For --prefix=/usr/local, we can't usefully dnl install rules to $libdir/udev, because udev quite simply won't look there. dnl dnl The fallback for ACM_PKG_CONFIG_GET_VAR here should probably instead be dnl $EXP_LIBDIR/udev, but on systems with "split usr" the vendor rules are all dnl installed in /lib, and on those with "merged usr" both /lib and /usr/lib dnl point to the same place anyway, so it's probably the safest path to default dnl to here for now if pkg-config support is not available. Especially since dnl in the "split" case, udevd will only look in one of those locations. This dnl one in particular is even more awful than the usual systemd-grade mess is. AS_IF([test "$mu_cv_with_udev" != no && test -z "$UDEV_RULES_DIR"],[ ACM_IF_VENDOR_BUILD([ACM_PKG_CONFIG_GET_VAR([UDEV_DIR],[udev],[udevdir],[/lib/udev])], [AC_MSG_CHECKING([for UDEV_DIR]) UDEV_DIR="/etc/udev" AC_MSG_RESULT([$UDEV_DIR (for prefix=$EXP_PREFIX)]) ]) UDEV_RULES_DIR="$UDEV_DIR/rules.d" ]) dnl We don't usually expect people to mess with these two. AC_SUBST([UDEV_CPPFLAGS]) AC_SUBST([UDEV_LIBS]) dnl But this one they might want to override in some cases. AC_ARG_VAR([UDEV_RULES_DIR], [where to install udev rules]) bit-babbler-0.9/Makeup/ac-fragments/makeup.m40000644000000000000000000026360214125243667015755 0ustar dnl Makeup aclocal macros. dnl dnl Copyright 2003 - 2021, Ron dnl dnl These macros are distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. # ACM_LANG # # Expands to the currently set AC_LANG. We use this wrapper in case _AC_LANG # ever changes as the only place we can currently retrieve that value from. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_LANG], [m4_defn([_AC_LANG])]) # ACM_LANG_ABBREV # # Expands to the short signature of _AC_LANG which can be used in shell # variable names, or in M4 macro names, for the currently set AC_LANG. # We use this wrapper in case _AC_LANG_ABBREV ever changes as the only # place we can currently retrieve that value from. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_LANG_ABBREV], [_AC_LANG_ABBREV]) # ACM_LANG_PREFIX # # Expands to the short (upper case) signature of _AC_LANG that is used to # prefix environment variables like FLAGS, for the currently set AC_LANG. # We use this wrapper in case _AC_LANG_PREFIX ever changes as the only # place we can currently retrieve that value from. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_LANG_PREFIX], [_AC_LANG_PREFIX]) # ACM_LANG_COMPILER # # Expands to the currently set value for CC if AC_LANG is C, or CXX for C++, # and so on. We use this wrapper in case _AC_CC ever changes as the only # place we can currently retrieve that value from. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_LANG_COMPILER], [$[]_AC_CC]) # ACM_TR_SH_LITERAL([LITERAL]) # # Transform LITERAL into a valid shell variable name. This is similar to # AS_TR_SH except it is not polymorphic and operates strictly on the literal # passed to it without expanding any shell variables or constructs. We use # the same remapping, as we want strings transformed by this to be the same # as when they are passed to AS_VAR_PUSHDEF, including the oddball mapping # of * and + to 'p', and emulating the shell transformation behaviour of # stripping all backslash, single, and double quotes, before converting any # other non-alphanumeric characters to an underscore. # # It's not clear whether doing an extra translit (to strip the quotes) is less # optimal than stripping those using two replacements with m4_bpatsubsts, but # we can't use m4_bpatsubst to do the non-alphanumeric replacement, as that # needs to be overquoted to avoid expanding any macros in the literal, which # means we'll transform the final outer quotes to underscores as well, adding # extra leading and trailing characters that weren't in the original literal. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_TR_SH_LITERAL], [dnl m4_translit(m4_translit(m4_bpatsubst([[[[$1]]]],[\\\(.\)],[\1]), ['"]), [*+[]]]m4_dquote(m4_defn([m4_cr_not_symbols2]))[, [pp[]]]m4_dquote(m4_for(,1,255,,[[_]]))[)]) ]) # ACM_ADD_OPT([VARS],[ITEMS],[SEPARATOR]) # # Append each of the comma-separated ITEMS to a SEPARATOR-separated list in # each of the comma-separated VARS. If SEPARATOR is not specified, it will # default to a single space. For example: ACM_ADD_OPT([foo,bar],[fee,fie]) # is equivalent to: # foo+=" fee fie" # bar+=" fee fie" # But without the leading space if foo or bar were empty before this operation. # And then ACM_ADD_OPT([foo],[foe,fum],[, ]) would result in foo containing the # suffix string "fee fie, foe, fum". # ----------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_OPT], [dnl dnl The expansion of $3 here is double quoted if it's not empty, so that it can dnl contain a comma for outputting comma-separated lists. m4_pushdef([sep],[m4_default_quoted([$3],[ ])])dnl m4_chomp(m4_foreach([var],[$1], [m4_foreach([val],[$2], [ var="${var:+$var[]AS_ESCAPE(m4_dquote(m4_expand([sep])))}m4_expand([val])"[]dnl ])dnl ])dnl )dnl m4_popdef([sep])dnl ]) # ACM_PUSH_VAR([SCOPE],[VARS],[NEW-VALUE]) # # For each comma separated variable in VARS, preserve their current value in # acm_save_SCOPE_VAR, and if NEW-VALUE is set, assign that value to it. The # previous value will be restored to each variable that ACM_POP_VAR is later # called for using the SCOPE it was pushed to. SCOPE may be any literal that # can be used to form a valid shell variable name, but when called from in a # macro, the name of the calling macro (ie. $0) is probably a sensible choice. # This is not a stack, only the last pushed value for each SCOPE used can be # restored. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_PUSH_VAR], [dnl m4_foreach([var],[$2],[dnl acm_save_[]ACM_TR_SH_LITERAL([$1])_[]var=$var m4_ifval([$3],[dnl var=[$3] ])dnl ])dnl ]) # ACM_POP_VAR([SCOPE],[VARS]) # # For each comma separated variable in VARS, restore the value that was saved # by a previous call to ACM_PUSH_VAR. This is not a stack, only the last value # which was pushed for SCOPE is saved. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_POP_VAR], [dnl m4_foreach([var],[$2],[dnl var=$acm_save_[]ACM_TR_SH_LITERAL([$1])_[]var ])dnl ]) # ACM_PUSH_VAL([SCOPE],[VARS],[VALUES],[SEPARATOR]) # # For each comma separated variable in VARS, preserve their current value in # acm_save_SCOPE_VAR, then append each of the comma-separated VALUES to it as # a SEPARATOR separated list. If SEPARATOR is not specified, it will default # to a single space. The saved values can be restored for each variable if # ACM_POP_VAR is later called using the SCOPE it was pushed to. This is not # a stack, only the last pushed value for each SCOPE used can be restored. # # This is a convenience macro that is equivalent to calling ACM_PUSH_VAR # followed by ACM_ADD_OPT. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_PUSH_VAL], [dnl m4_foreach([var],[$2],[dnl acm_save_[]ACM_TR_SH_LITERAL([$1])_[]var=$var m4_ifval([$3],[dnl ACM_ADD_OPT(var,[$3],[$4]) ])dnl ])dnl ]) # ACM_REPUSH_VAL([SCOPE],[VARS],[VALUES],[SEPARATOR]) # # For each comma separated variable in VARS, restore the previously pushed # value from acm_save_SCOPE_VAR, then append each of the comma-separated VALUES # to it as a SEPARATOR separated list. If SEPARATOR is not specified, it will # default to a single space. The initially pushed value will be still restored # for each variable that ACM_POP_VAR is later called for. This is not a stack, # only the initially pushed value can be restored. # # This is a convenience macro that is equivalent to calling ACM_POP_VAR # followed by ACM_PUSH_VAL. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_REPUSH_VAL], [dnl m4_pushdef([sep],[m4_default_quoted([$4],[ ])])dnl m4_pushdef([scope],m4_expand([acm_save_[]ACM_TR_SH_LITERAL([$1])_]))dnl m4_foreach([var],[$2],[m4_ifval([$3],[dnl var="${scope[]var:+$scope[]var[]AS_ESCAPE(m4_dquote(m4_expand([sep])))}m4_join(sep,$3)" ],[dnl var=$scope[]var ])dnl ])dnl m4_popdef([scope],[sep])dnl ]) # ACM_FOREACH([VAR],[LIST],[IF-LITERAL],[IF-NOT]) # # Iterate over each of the comma-separated elements of LIST, assigning them to # VAR. If VAR is a literal then IF-LITERAL will be expanded. If VAR is a # shell expression, then for each of the comma-separated elements it expands to # the expansion of IF-NOT will be executed. If IF-NOT is unset, then it will # use IF-LITERAL with each instance of VAR replaced by $VAR (so it will expand # as a shell variable instead of an m4 macro) - which means you normally should # not need to set IF-NOT unless there is something additionally special which # should be done if looping in the shell. # # This allows macros to be written which behave the same at runtime regardless # of whether the comma-separated list provided to them is literal text or the # value of a shell variable. Literal elements will be unrolled by m4, while # runtime variables will be looped over in the shell, and both may be used in # LIST together. The following are all equivalent in their runtime behaviour: # # list="foo,bar" # ACM_FOREACH([var],[foo,bar],[DO_STUFF(var)]) # ACM_FOREACH([var],[$list],[DO_STUFF(var)]) # ACM_FOREACH([var],[$list],[DO_STUFF(var)],[DO_STUFF($var)]) # # And this is legal, but does the same stuff to each of foo and bar twice. # ACM_FOREACH([var],[foo,$list,bar],[DO_STUFF(var)]) # # Note that VAR normally should not be overquoted where it is used, most cases # would want it expanded as the argument to IF-LITERAL, and not in some later # expansion of whatever it is passed to. # ----------------------------------------------------------------------------- dnl Recursion count for scoping ACM_FOREACH expansion of macros which then also dnl use ACM_FOREACH again. m4_define([_acm_foreach_nestlevel],[0]) AC_DEFUN([ACM_FOREACH], [dnl m4_pushdef([_acm_foreach_nestlevel],m4_incr(_acm_foreach_nestlevel))dnl m4_pushdef([scope],[$0_[]_acm_foreach_nestlevel])dnl dnl In the case of IF-LITERAL handling a non-literal expression, we overquote dnl it to ensure that VAR is converted to $VAR in the text we were passed, not dnl in whatever results from some subsequent expansion of macros in it. We can dnl then safely expand any remaining macros in it after that is done. m4_expand(m4_foreach([$1],[$2],[AS_LITERAL_IF([$1],[$3],[dnl ACM_PUSH_VAR(scope,[IFS],[,])dnl acm_[$1]_list="$1" for [$1] in $acm_[$1]_list; do ACM_POP_VAR(scope,[IFS])dnl [$1]="${[$1][#]"${[$1]%%[[![:space:]]]*}"}" [$1]="${[$1]%"${[$1][##]*[[![:space:]]]}"}" m4_default_quoted([$4],m4_bpatsubst([[$3]],[$1],[$$1])) done ACM_POP_VAR(scope,[IFS])])]))dnl m4_popdef([scope],[_acm_foreach_nestlevel])dnl ]) # ACM_FOREACH_W([VAR],[LIST],[IF-LITERAL],[IF-NOT]) # # This macro is similar to ACM_FOREACH, except it expects the elements of LIST # to be space-separated. The corresponding example case would be: # # list="foo bar" # ACM_FOREACH([var],[foo $list bar],[DO_STUFF(var)]) # # Which again would do DO_STUFF to each of foo and bar twice, once as a literal # expansion and again as an operation on the expansion of the shell variable. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_FOREACH_W], [dnl dnl In the case of IF-LITERAL handling a non-literal expression, we overquote dnl it to ensure that VAR is converted to $VAR in the text we were passed, not dnl in whatever results from some subsequent expansion of macros in it. We can dnl then safely expand any remaining macros in it after that is done. m4_expand(m4_foreach_w([$1],[$2],[AS_LITERAL_IF([$1],[$3],[ acm_[$1]_list="$1" for [$1] in $acm_[$1]_list; do m4_default_quoted([$4],m4_bpatsubst([[$3]],[$1],[$$1])) done])]))dnl ]) # ACM_FOREACH_REV_W([VAR],[LIST],[IF-LITERAL],[IF-NOT]) # # This macro is similar to ACM_FOREACH_W, except it operates on each element of # the space-separated LIST in the reverse order, acting on the right-most list # elements first. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_FOREACH_REV_W], [dnl dnl In the case of IF-LITERAL handling a non-literal expression, we overquote dnl it to ensure that VAR is converted to $VAR in the text we were passed, not dnl in whatever results from some subsequent expansion of macros in it. We can dnl then safely expand any remaining macros in it after that is done. m4_expand(m4_foreach([$1],m4_dquote(m4_reverse(m4_unquote(m4_split(m4_normalize([$2]))))), [AS_LITERAL_IF([$1],[$3],[ acm_[$1]_list="$1" acm_[$1]_rev_list="" for [$1] in $acm_[$1]_list; do acm_[$1]_rev_list="$[$1] $acm_[$1]_rev_list" done for [$1] in $acm_[$1]_rev_list; do m4_default_quoted([$4],m4_bpatsubst([[$3]],[$1],[$$1])) done])]))dnl ]) # ACM_IF_CONTAINS([VAR],[ITEM],[IF-ITEM],[IF-NOT-ITEM],[SEPARATOR]) # # This macro tests if ITEM is included in the SEPARATOR-separated list that is # the current value of VAR, and executes the commands in IF-ITEM if so, else # the (optional) commands IF-NOT-ITEM. If SEPARATOR is not explicitly passed # then it defaults to checking for space-separated items. # For example: # ACM_IF_CONTAINS([FOO_LIBS],[bar],[TEST_FOR_LIBBAR]) # # Will execute the expansion of TEST_FOR_LIBBAR if FOO_LIBS includes 'bar'. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_IF_CONTAINS], [dnl dnl The expansion of $5 here is double quoted if it's not empty, so that it can dnl contain a comma for checking comma-separated lists. dnl dnl Note that we don't normally need to quote the variables in case statements, dnl because its expansion won't be word-split, but we do need quotes around at dnl least the separators in this case since it may contain shell metacharacters dnl which would break things. We prepend and append the separator to the list dnl so that we can do word delimited matching including items at the beginning dnl and end of the list. We don't want to match with a partial item name here. m4_pushdef([sep],[m4_default_quoted([$5],[ ])])dnl AS_CASE(["AS_ESCAPE(m4_dquote(m4_expand([sep])))${$1}[]AS_ESCAPE(m4_dquote(m4_expand([sep])))"], [*"AS_ESCAPE(m4_dquote(m4_expand([sep])))$2[]AS_ESCAPE(m4_dquote(m4_expand([sep])))"*], [$3], [$4])dnl m4_popdef([sep])dnl ]) # ACM_IF_VENDOR_BUILD([IF-VENDOR-BUILD],[IF-LOCAL-BUILD],[IF-OTHER]) # # If this is a build intended for installation to the vendor-reserved locations # on the filesystem (i.e. with --prefix=/usr, for a distro package or similar) # then the expansion of IF-VENDOR-BUILD will be used. If we are building for a # local admin install (i.e. with --prefix=/usr/local) then IF-LOCAL-BUILD will # be used. For all other install prefixes, IF-OTHER will be used if provided, # otherwise IF-LOCAL-BUILD will be used in that case too. # # Most things will never need to use this as the desired result can usually be # obtained by simply using the $prefix variable and its derived values as they # were intended to be. This is a workaround for the things which break that # assumption and need other special handling to make them behave sanely for # local user builds. Yeah, we're looking at you systemd. Not in admiration. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_IF_VENDOR_BUILD], [dnl AS_IF([test "$prefix" = "/usr" || { test "$prefix" = "NONE" && test "$ac_default_prefix" = "/usr" ; }], [$1], m4_ifval([$3],dnl [dnl [test "$prefix" = "/usr/local" || { test "$prefix" = "NONE" && test "$ac_default_prefix" = "/usr/local" ; }], [$2], [$3] ],[dnl [$2] ])dnl )dnl ]) # ACM_EXPAND_VARS([OUTVAR],[EXP]) # # Recursively expand shell expression EXP until no further expansion occurs # and assign the resulting string to OUTVAR. No checking is done to stop a # pathological expression from expanding infinitely. This is mostly intended # for the simple case of expanding a variable which may be defined in terms # of other variables, in the manner of the install directory variables which # are usually relative to ${prefix} and other directory root variables. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_EXPAND_VARS], [dnl $1=$2 while test "$$1" != "$acm_tmp_$1"; do acm_tmp_$1=$$1 eval $1=$$1 done AS_UNSET([acm_tmp_$1])dnl ]) # ACM_EXPAND_DIR([OUTVAR],[INVAR],[DESCRIPTION]) # # This is a specialisation of ACM_EXPAND_VARS for standard install directory # variables which handles the case of ${prefix} and/or ${exec_prefix} not yet # being set (since that normally happens near the very end of the generated # configure script, just before config.status is created, unless values for # them were passed explicitly by the caller). In addition to fully expanding # INVAR and assigning that to OUTVAR, this will declare OUTVAR itself to be a # precious substitution variable, and define it in the config header. # If the DESCRIPTION parameter is passed, it will be included as the precious # variable description for ./configure --help, and as the macro description # in the configuration header. Otherwise a default description will be used # to indicate it is the expansion of INVAR. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_EXPAND_DIR], [dnl ACM_PUSH_VAR([$0],[prefix,exec_prefix])dnl AS_IF([test "$prefix" = "NONE"],[prefix=$ac_default_prefix]) AS_IF([test "$exec_prefix" = "NONE"],[exec_prefix=$prefix]) ACM_EXPAND_VARS([$1],[$2])dnl m4_pushdef([desc],[m4_default_quoted([$3],[The fully expanded $2 path])])dnl AC_ARG_VAR([$1],desc) AC_DEFINE_UNQUOTED([$1],["$$1"],desc) m4_popdef([desc])dnl ACM_POP_VAR([$0],[prefix,exec_prefix])dnl ]) # ACM_PUSH_LANG_FOR_FLAGS([CALLER],[FLAGS_PREFIX]) # # Call AC_LANG_PUSH for the language that FLAGS_PREFIX is used for. Reporting # an error in the CALLER if that can't be mapped to a language identifier. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_PUSH_LANG_FOR_FLAGS], [dnl m4_case([$2], [C],[AC_LANG_PUSH([C])], [CXX],[AC_LANG_PUSH([C++])], [m4_fatal([$1: unknown toolchain type '$2'])])dnl ]) # ACM_POP_LANG_FOR_FLAGS([CALLER],[FLAGS_PREFIX]) # # Call AC_LANG_POP for the language that FLAGS_PREFIX is used for. Reporting # an error in the CALLER if that can't be mapped to a language identifier. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_POP_LANG_FOR_FLAGS], [dnl m4_case([$2], [C],[AC_LANG_POP([C])], [CXX],[AC_LANG_POP([C++])], [m4_fatal([$1: unknown toolchain type '$2'])])dnl ]) # ACM_ADD_UNIQUE([VARS],[ITEMS],[SEPARATOR]) # # This macro does the same thing as ACM_ADD_OPT except it will only add each # item if it isn't already present in the current list held by the variable. # Unlike ACM_ADD_OPT, it will also expand any variables in the list of ITEMS # rather than simply appending them as variables, so that it is the expanded # content which is checked for uniqueness, not the variable name. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_UNIQUE], [dnl m4_chomp(m4_foreach([var],[$1], [ACM_FOREACH([val],[$2], [ ACM_IF_CONTAINS([var],[val], [], [var="${var:+$var[]AS_ESCAPE(m4_dquote(m4_expand([sep])))}m4_expand([val])"], [$3])dnl ])dnl ])dnl )dnl ]) # ACM_ADD_LIBS([VARS],[LIBS]) # # This macro is similar to ACM_ADD_UNIQUE except that it operates on a space- # separated list of LIBS in reverse order (from right to left) prepending any # unique new elements to the value of each of the (comma-separated) VARS, as # a space-separated list. # # Typically, this behaviour is most useful for building up a concise list of # libraries to link with, where "high-level" libraries (with dependencies on # other libraries) must be specified to the left of all their dependencies, # and where those libraries may share common lower-level dependencies. Each # of the shared dependencies will be included only once, in an order which is # suitable for use with both static and dynamic linking. For example: # # FOO_LIBS="foo m" # BAR_LIBS="bar z m" # ACM_ADD_LIBS([MY_LIBS],[mylib $BAR_LIBS $FOO_LIBS]) # # Will result in MY_LIBS="mylib bar z foo m". The list may also be built up # progressively, so the same result would be obtained from: # ACM_ADD_LIBS([MY_LIBS],[$FOO_LIBS]) # ACM_ADD_LIBS([MY_LIBS],[$BAR_LIBS]) # ACM_ADD_LIBS([MY_LIBS],[mylib]) # # The only requirement is that each macro expansion must be fully specified, # so that none of the earlier calls implicitly depend upon libraries which # will only be listed in later additions - but ordering library tests to run # with low level library checks being done before higher level ones should # already be a fairly natural thing to do in most cases. # ----------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_LIBS], [dnl m4_chomp(m4_foreach([var],[$1], [ACM_FOREACH_REV_W([val],[$2], [ ACM_IF_CONTAINS([var],[val], [], [var="m4_expand([val])${var:+ $var}"])dnl ])dnl ])dnl )dnl ]) # SHUT_THE_FUP(AC_MACRO) # # Stealth mode for AC_ macros. Exploits implementation so it's # potentially fragile, but almost all failure modes do not # affect the functional output, and those that do should be # immediately obvious if they occur :) Use it to wrap a macro # that would output something you don't want on the console. # Do NOT quote them. Will honour all other nasty hack's disclaimers # upon presentation. # ------------------------------------------------------------------- AC_DEFUN([SHUT_THE_FUP], [dnl exec 6>/dev/null $@dnl exec 6>&1 ]) # ACM_REQUIRE_LN_S # # This macro should be AC_REQUIRE'd by any macro that # may need $LN_S to be defined before it is expanded. # --------------------------------------------------- AC_DEFUN([ACM_REQUIRE_LN_S], [dnl if test -z "$LN_S"; then SHUT_THE_FUP(AC_PROG_LN_S) fi ]) # FIND_AND_LINK_IF_LOCAL([FILE][,DEST][,SOURCE_DIRS]) # # If FILE does not exist in dir DEST look for it in the immediate # parent directories and if found create a symlink to it. Else # try to find it in a space-separated list of SOURCE_DIRS (which # default to a list of standard system locations) and then copy it. # If DEST is not supplied explicitly, it will default to $srcdir, # if DEST is supplied and does not exist, it will be created too. # DEST is assumed to always be relative to $srcdir. # --------------------------------------------------------------- AC_DEFUN([FIND_AND_LINK_IF_LOCAL], [dnl AC_REQUIRE([ACM_REQUIRE_LN_S])dnl dnl if test -n "[$2]"; then _filedest=$srcdir/[$2] mkdir -p $_filedest else _filedest=$srcdir fi if test -n "[$3]"; then _filesources="[$3]" else _filesources="/usr/share/misc /usr/share/automake* /usr/share/libtool" fi if test ! -e "$_filedest/[$1]" ; then AC_MSG_CHECKING([for $_filedest/[$1]]) ( cd $_filedest for d in ".." "../.." ; do if test -r "$d/[$1]" ; then AC_MSG_RESULT([linking from $d/[$1].]) $LN_S "$d/[$1]" . break fi done ) if test ! -e "$_filedest/[$1]" ; then for d in $_filesources; do if test -r "$d/[$1]" ; then AC_MSG_RESULT([copying from $d/[$1].]) cp -a "$d/[$1]" "$_filedest/[$1]" break fi done fi if test ! -e "$_filedest/[$1]" ; then AC_MSG_ERROR([Failed to locate [$1]. Stopping.]) fi fi ]) # FIND_AND_COPY_UNLESS_LOCAL([FILE][,DEST]) # # If FILE does not exist in dir DEST look for it in the immediate # parent directories and if found do nothing. Else try to find # it in a standard system location and then copy it over. # Note that we dereference symlinks here, which is probably what # you want if you're using this macro.. maybe. # If DEST is not supplied explicitly, it will default to $srcdir, # if DEST is supplied and does not exist, it will be created too. # --------------------------------------------------------------- AC_DEFUN([FIND_AND_COPY_UNLESS_LOCAL], [dnl if test -n "[$2]"; then _filedest=[$2] mkdir -p $_filedest else _filedest=$srcdir fi if test ! -e "$_filedest/[$1]" ; then AC_MSG_CHECKING([for $_filedest/[$1]]) if ( if ( cd $_filedest for d in ".." "../.." ; do if test -r "$d/[$1]" ; then AC_MSG_RESULT([leeching from $d/[$1].]) exit 1 fi done ) ; then for d in "/usr/share/misc" "/usr/share/automake" "/usr/share/libtool" \ "/usr/share/automake-1.6" "/usr/share/automake-1.7" ; do if test -r "$d/[$1]" ; then cp -aL "$d/[$1]" "$_filedest/[$1]" AC_MSG_RESULT([copying from $d/[$1].]) exit 1 fi done else exit 1 fi ) ; then AC_MSG_ERROR([Failed to locate [$1]. Stopping.]) fi fi ]) # ACM_CONFIG_MAKEFILE(MAKEUP_GMAKE_DIR,[GLOBAL_VARIABLES]) # # This macro instantiates a forwarding Makefile in the build directory # and its corresponding Makefile.acsubst. It is also used to define # global variables that need to be available in config.status. # -------------------------------------------------------------------- AC_DEFUN([ACM_CONFIG_MAKEFILE], [dnl dnl This 'before' is not strictly required, but since this macro dnl will usually define globals needed before ACM_CONFIG_HEADER dnl then it seems like a reasonable sanity check. Feel free to dnl remove or work around it if it is causing real problems. AC_BEFORE([$0],[ACM_CONFIG_HEADER])dnl m4_if([$#],0,[AC_MSG_ERROR([[ACM_CONFIG_MAKEFILE] must have at least one parameter])]) AC_CONFIG_COMMANDS([Makefile], [ cat > Makefile < # # This file is distributed under the terms of the GNU GPL version 2. # # As a special exception to the GPL, it may be distributed without # modification as a part of a program using a makeup generated build # system, under the same distribution terms as the program itself. include Makefile.acsubst include \$(MAKEUP_TOP_CONFIG) ifneq (\$(strip \$(MAKEUP_VERBOSE)),) include \$(MAKEUP_GMAKE_DIR)/makefile.makeup else -include \$(MAKEUP_GMAKE_DIR)/makefile.makeup endif EOF ],[ [$2] ]) AC_CONFIG_FILES([Makefile.acsubst:$1/makefile.acsubst])dnl ]) # ACM_DEFINE_PUBLIC(VARIABLE,[VALUE],[DESCRIPTION]) # # Causes VARIABLE to be defined in a public config header, such as: # # /* DESCRIPTION */ # #ifndef VARIABLE # #define VARIABLE VALUE # #endif # # If VALUE is unspecified then VARIABLE will be defined to be empty. # This macro should be used in conjunction with ACM_CONFIG_HEADER # which defines the name of the config headers in which to output # variables defined with this macro. # ------------------------------------------------------------------ AC_DEFUN([ACM_DEFINE_PUBLIC], [dnl AC_BEFORE([$0],[ACM_CONFIG_HEADER])dnl m4_if(m4_bregexp([$1],[^[A-Za-z_]+[A-Za-z0-9_]+$]),-1, [AC_MSG_ERROR([Bad variable name '[$1]' supplied to [ACM_DEFINE_PUBLIC]])]) acm_public_macros="$acm_public_macros [$1]" if test -z "$acm_public_macros_def"; then acm_public_macros_def="acm_public_macro_[$1]=\"[$2]\"; acm_public_macro_desc_[$1]=\"[$3]\"" else acm_public_macros_def="$acm_public_macros_def; acm_public_macro_[$1]=\"[$2]\"; acm_public_macro_desc_[$1]=\"[$3]\"" fi ]) # ACM_DEFINE_PUBLIC_STRING(VARIABLE,[VALUE],[DESCRIPTION]) # # Causes VARIABLE to be defined as a literal string # in a public config header, such as: # # /* DESCRIPTION */ # #ifndef VARIABLE # #define VARIABLE "VALUE" # #endif # # If VALUE is unspecified then VARIABLE will be defined to be empty. # This macro should be used in conjunction with ACM_CONFIG_HEADER # which defines the name of the config headers in which to output # variables defined with this macro. # ------------------------------------------------------------------ AC_DEFUN([ACM_DEFINE_PUBLIC_STRING], [dnl AC_BEFORE([$0],[ACM_CONFIG_HEADER])dnl m4_if(m4_bregexp([$1],[^[A-Za-z_]+[A-Za-z0-9_]+$]),-1, [AC_MSG_ERROR([Bad variable name '[$1]' supplied to [ACM_DEFINE_PUBLIC_STRING]])]) acm_public_strings="$acm_public_strings [$1]" if test -z "$acm_public_strings_def"; then acm_public_strings_def="acm_public_string_[$1]=\"[$2]\"; acm_public_string_desc_[$1]=\"[$3]\"" else acm_public_strings_def="$acm_public_strings_def; acm_public_string_[$1]=\"[$2]\"; acm_public_string_desc_[$1]=\"[$3]\"" fi ]) # ACM_CONFIG_HEADER(NAME) # # This is an instantiating macro that should usually be included # shortly before AC_OUTPUT. If will create a config file containing # the public symbols declared by ACM_DEFINE_PUBLIC # ----------------------------------------------------------------- AC_DEFUN([ACM_CONFIG_HEADER], [dnl AC_REQUIRE([ACM_REQUIRE_LN_S])dnl m4_if([$#],1,[],[AC_MSG_ERROR([[ACM_CONFIG_HEADER] must have only one parameter])]) dnl is there a way to disable this at m4 time if ACM_DEFINE_PUBLIC has never been used? AC_CONFIG_COMMANDS([include/$1], [ _SUBDIR="$(dirname [$1])/" if test "$_SUBDIR" = "./"; then _SUBDIR= fi _TEMPFILE="include/.tempfile" _GUARD="_MAKEFILE_PLATFORM_$(echo $package_name | tr "a-z .-" "A-Z___")_CONF_H" cat > $_TEMPFILE < * * This file is distributed under the terms of the GNU GPL version 2. * * As a special exception to the GPL, it may be distributed without * modification as a part of a program using a makeup generated build * system, under the same distribution terms as the program itself. */ #ifndef ${_GUARD} #define ${_GUARD} // Guard for POSIX dependent code #if defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__)) #if (EM_PLATFORM_POSIX != 1) #define EM_PLATFORM_POSIX 1 #endif // Guard for Linux kernel dependent code #if defined(__linux__) #if linux == 1 #define SAVE_linux #undef linux #elif defined(linux) #warning Macro 'linux' is defined to a value other than 1 #endif #if (EM_PLATFORM_LINUX != 1) #define EM_PLATFORM_LINUX 1 #endif #define EM_PLATFORM__ linux #else // Guard for BSD dependent code #include #if defined(BSD) || defined(__FreeBSD_kernel__) #if (EM_PLATFORM_BSD != 1) #define EM_PLATFORM_BSD 1 #endif #define EM_PLATFORM__ bsd #endif // Guard for MacOSX dependent code #if defined(__APPLE__) && defined(__MACH__) && (EM_PLATFORM_MAC != 1) #define EM_PLATFORM_MAC 1 #endif #endif #endif // Guard for Windows dependent code #if defined(_WIN32) #if (EM_PLATFORM_MSW != 1) #define EM_PLATFORM_MSW 1 #endif #define EM_PLATFORM__ msw #endif #ifndef EM_PLATFORM__ #error Platform unrecognised. #endif // Feature override macro. // // You may define the value of this macro to specify a configuration // other than the system default. 'd' will attempt to use a debug // build, 'r' a release build. Other flavour options may be defined // by individual packages in their own configuration. #ifndef EM_CONFIG_FLAVOUR #define EM_CONFIG_FLAVOUR #endif #define EM_CAT(a,b) EM_CAT_(a,b) #define EM_CAT_(a,b) a ## b #define EM_CONFIG_HEADER <${__package_config_dir}EM_CAT(EM_PLATFORM__,EM_CONFIG_FLAVOUR)_${__package_config_public}> #include EM_CONFIG_HEADER #ifndef _${_GUARD} #error Config header cannot be located #endif #undef EM_CAT #undef EM_CAT_ #undef EM_PLATFORM__ #undef EM_CONFIG_HEADER #ifdef SAVE_linux #define linux 1 #undef SAVE_linux #endif // Compiler version tests. // // This macro will return false if the version of gcc in use // is earlier than the specified major, minor limit, or if gcc // is not being used. Otherwise it will evaluate to be true. // This will also be true for the clang compiler, for whatever // GCC version it is pretending to be compatible with. #if defined(__GNUC__) && defined(__GNUC_MINOR__) #define EM_COMPILER_GCC( major, minor ) ( ( __GNUC__ > (major) ) \\ || ( __GNUC__ == (major) && __GNUC_MINOR__ >= (minor) ) ) #else #define EM_COMPILER_GCC( major, minor ) 0 #endif // As above, except for the clang compiler instead. #if defined(__clang_major__) && defined(__clang_minor__) #define EM_COMPILER_CLANG( major, minor ) ( ( __clang_major__ > (major) ) \\ || ( __clang_major__ == (major) && __clang_minor__ >= (minor) ) ) #else #define EM_COMPILER_CLANG( major, minor ) 0 #endif #endif // ${_GUARD} EOF if diff --brief include/[$1] $_TEMPFILE > /dev/null 2>&1; then AC_MSG_NOTICE([[$1] is unchanged]) rm $_TEMPFILE else mv $_TEMPFILE include/[$1] fi echo "/* Makeup generated $_SUBDIR$config_flavour */" > $_TEMPFILE echo >> $_TEMPFILE echo "#ifndef _${_GUARD}" >> $_TEMPFILE echo "#define _${_GUARD}" >> $_TEMPFILE echo >> $_TEMPFILE for m in $acm_public_macros; do eval echo "/\* \$acm_public_macro_desc_$m \*/" >> $_TEMPFILE echo "#ifndef $m" >> $_TEMPFILE eval echo "\#define $m \$acm_public_macro_$m" >> $_TEMPFILE echo "#endif" >> $_TEMPFILE echo >> $_TEMPFILE done for s in $acm_public_strings; do eval echo "/\* \$acm_public_string_desc_$s \*/" >> $_TEMPFILE echo "#ifndef $s" >> $_TEMPFILE eval echo "\#define $s \\\"\$acm_public_string_$s\\\"" >> $_TEMPFILE echo "#endif" >> $_TEMPFILE echo >> $_TEMPFILE done echo "#endif // _${_GUARD}" >> $_TEMPFILE if diff --brief include/$_SUBDIR$config_flavour $_TEMPFILE > /dev/null 2>&1; then AC_MSG_NOTICE([$_SUBDIR$config_flavour is unchanged]) rm $_TEMPFILE else mv $_TEMPFILE include/$_SUBDIR$config_flavour fi ( cd include/$_SUBDIR if test ! -e $config_platform; then $LN_S $config_flavour $config_platform fi ) ],[ acm_public_macros="$acm_public_macros" $acm_public_macros_def acm_public_strings="$acm_public_strings" $acm_public_strings_def LN_S="$LN_S" config_platform="$MAKEUP_PLATFORM_HEADER" config_flavour="$MAKEUP_FLAVOUR_HEADER" ]) ]) # ACM_CPP_PUSH_POP_DIAGNOSTIC_MACROS # # Define a set of CPP macros for local tweaking of 'GCC diagnostic' settings # around Special Snowflake code. Mostly these are used for suppressing some # incorrect diagnostic, but they can also be used to locally add additional # diagnostics around code that might require that. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_CPP_PUSH_POP_DIAGNOSTIC_MACROS], [dnl AC_MSG_NOTICE([Including EM_PUSH/POP_DIAGNOSTIC preprocessor macros ...]) AH_VERBATIM([DIAGNOSTIC_PUSH_POP], [ /* Safely stringify a pragma option (which may already include quotes) */ #define EM_PRAGMA(p) _Pragma (#p) /* Save the current diagnostic settings, and try to add or modify some diagnostic Option to have the given Action. Action may be one of: error, warning, or ignored, setting the new disposition of Option. If Option is not recognised by the compiler this request will be silently ignored. Clang-6 stopped using -Wunknown-pragmas (which was implied by -Wpragmas) for these, so we need to silence another option for it (which in turn still needs -Wpragmas, as GCC of course doesn't support clang's new -Wunknown-warning-option. Cooperation is hard ... */ #define EM_TRY_PUSH_DIAGNOSTIC( Action, Option ) \ EM_PRAGMA(GCC diagnostic push) \ EM_PRAGMA(GCC diagnostic ignored "-Wpragmas") \ EM_PRAGMA(GCC diagnostic ignored "-Wunknown-warning-option") \ EM_PRAGMA(GCC diagnostic Action Option) /* Save the current diagnostic settings, and try to add or modify some diagnostic Option to have the given Action. Action may be one of: error, warning, or ignored, setting the new disposition of Option. If Option is not recognised by the compiler this request may itself generate a compile time diagnostic warning or error depending on the compiler defaults and command line options used. */ #define EM_PUSH_DIAGNOSTIC( Action, Option ) \ EM_PRAGMA(GCC diagnostic push) \ EM_PRAGMA(GCC diagnostic Action Option) /* Set the Action for additional diagnostic Options. The current settings are not pushed, so a call to EM_POP_DIAGNOSTIC will revert all changes made since the last time EM_PUSH_DIAGNOSTIC was used. */ #define EM_MORE_DIAGNOSTIC( Action, Option ) \ EM_PRAGMA(GCC diagnostic Action Option) ] m4_foreach([action],[[IGNORE,ignored],[WARN,warning],[ERROR,error]], [ /* Equivalent to EM_TRY_PUSH_DIAGNOSTIC( m4_shift(action), Option ) */ @%:@define EM_TRY_PUSH_DIAGNOSTIC_[]m4_car(action)( Option ) \ EM_TRY_PUSH_DIAGNOSTIC( m4_shift(action), Option ) /* Equivalent to EM_PUSH_DIAGNOSTIC( m4_shift(action), Option ) */ @%:@define EM_PUSH_DIAGNOSTIC_[]m4_car(action)( Option ) \ EM_PUSH_DIAGNOSTIC( m4_shift(action), Option ) /* Equivalent to EM_MORE_DIAGNOSTIC( m4_shift(action), Option ) */ @%:@define EM_MORE_DIAGNOSTIC_[]m4_car(action)( Option ) \ EM_MORE_DIAGNOSTIC( m4_shift(action), Option ) ])dnl [ /* Restore the diagnostic state to what it was before the last time it was pushed. If there is no corresponding push the command-line options are restored. */ #define EM_POP_DIAGNOSTIC \ EM_PRAGMA(GCC diagnostic pop) ])dnl ]) # __ACM_ADD_COMPILER_OPTION([FLAGS_PREFIX],[OPTION]) # # Implementation of _ACM_ADD_COMPILER_OPTION for doing the individual tests of # each of the specified OPTIONS. The correct default language should already be # set, so here we test if the OPTION is supported, caching the result of that # test in AS_TR_SH'ified mu_cv_${FLAGS_PREFIX}_flag_${OPTION}, and appending # any supported options to ${FLAGS_PREFIX}FLAGS. # ------------------------------------------------------------------------------ AC_DEFUN([__ACM_ADD_COMPILER_OPTION], [dnl ACM_PUSH_VAL([$0],[$1FLAGS],[$2])dnl AS_VAR_PUSHDEF([cachevar],[mu_cv_$1_flag_$2])dnl dnl We need to special case C => $CC here, but CXX => $CXX can be implicit. AC_CACHE_CHECK([if m4_case([$1],[C],[$CC],[$$1]) supports $2],[cachevar], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]],[[]])], [AS_VAR_SET([cachevar],[yes])], [AS_VAR_SET([cachevar],[no])] ) ]) AS_VAR_IF([cachevar],[yes],[],[ACM_POP_VAR([$0],[$1FLAGS])])dnl AS_VAR_POPDEF([cachevar])dnl ]) # _ACM_ADD_COMPILER_OPTION([FLAGS_PREFIX],[OPTIONS]) # # Implementation of ACM_ADD_COMPILER_OPTION for doing the individual tests with # each of the specified FLAGS_PREFIXES. This will temporarily switch the default # language based on FLAGS_PREFIX, then test if each of the comma-separated list # of OPTIONS is supported for that language. # ------------------------------------------------------------------------------ AC_DEFUN([_ACM_ADD_COMPILER_OPTION], [dnl ACM_PUSH_LANG_FOR_FLAGS([$0],[$1]) ACM_FOREACH([compiler_opt],[$2],[_$0([$1],m4_dquote(m4_expand([compiler_opt])))]) ACM_POP_LANG_FOR_FLAGS([$0],[$1]) ]) # ACM_ADD_COMPILER_OPTION([FLAGS_PREFIXES],[OPTIONS]) # # For each combination of the comma-separated FLAGS_PREFIXES and OPTIONS check # if the corresponding compiler supports that option, and if it does add it to # the *FLAGS for that language for use when compiling subsequent source. # # Currently supported values for FLAGS_PREFIXES are C and CXX (use [C,CXX] to # test and set options for both the C and C++ compiler). The FLAGS_PREFIXES # must be a literal string - but the OPTIONS may be either literals, a shell # expression which expands to a comma-separated list, or a mixture of both. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_COMPILER_OPTION], [dnl dnl We can't use ACM_FOREACH here, because AC_LANG_PUSH only takes literals. m4_foreach([lang],[$1],[_$0(lang,[$2])]) ]) # _ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION([FLAGS_PREFIX],[COMPILER_VAR]) # # Check if the compiler considers unknown warning options to be an error # by default, or if it needs an explicit extra option passed to do so. # This macro is an implementation detail required by ACM_ADD_COMPILER_WARNING. # # Testing whether a warning option is supported can be tricky. By default # GCC will consider -Wfoo to be an error if 'foo' is an unknown warning, # but it will not even emit a diagnostic for -Wno-foo unless some other # diagnostic message is also triggered, in which case it will merely warn # that an unrecognised option is also present. # # With the clang toolchain the behaviour in both cases is controlled by an # explicit option: -Wunknown-warning-option, which is enabled by default. # If that option is negated then no diagnostic is output, otherwise unknown # warning options of either polarity will simply emit a warning. If we want # to test whether a warning option is supported then we need to explicitly # add unknown-warning-option to the -Werror set to provoke a test failure # if it is not. # # The FLAGS_PREFIX determines which language will be tested and so which of # the *FLAGS variables the test options will be added to. Supported values # are currently C and CXX. # # The COMPILER_VAR is only used to report the toolchain being tested, so for # C it should be [$CC] and for C++ it should be [$CXX]. This parameter is # passed as a convenience, since there is no strictly consistent rule which # maps all the related identifiers for a language together, and the caller # should already know it, so we don't need extra logic here to look it up. # # The output variable ACM_${FLAGS_PREFIX}_WARNINGFAIL will be set to either # an empty string or the additional option(s) which need to be set in the # relevant *FLAGS when testing whether some warning option is supported. # # This macro shouldn't normally be invoked directly, instead the language # specific wrappers which don't need options (but whose name can still be # constructed from other macros) should be AC_REQUIRE'd before the output # variable is needed for the first time. # ---------------------------------------------------------------------------- AC_DEFUN([_ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION], [dnl ACM_PUSH_LANG_FOR_FLAGS([$0],[$1])dnl ACM_PUSH_VAL([$0],[$1FLAGS],[-Womg-wtf-not-an-option])dnl ACM_$1_WARNINGFAIL="" AC_CACHE_CHECK([if $2 unknown warning options are errors],[mu_cv_$1_flag_uwo], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]],[[]])], [mu_cv_$1_flag_uwo=no], [mu_cv_$1_flag_uwo=yes] ) ]) AS_IF([test "$mu_cv_$1_flag_uwo" = no],[ ACM_REPUSH_VAL([$0],[$1FLAGS],[-Werror=unknown-warning-option,-Womg-wtf-not-an-option]) AC_CACHE_CHECK([if $2 supports -Werror=unknown-warning-option],[mu_cv_$1_flag_werror_uwo], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]],[[]])], [mu_cv_$1_flag_werror_uwo=no], [mu_cv_$1_flag_werror_uwo=yes] ) ]) dnl It should be safe to fail open here. If we haven't figured out how to make the dnl compiler fail when passed an unknown warning option, then it should be relatively dnl safe to let tests default to passing them anyway. At best, they will actually dnl work as intended, and at worst it might make a lot of noise spitting out non-fatal dnl warning diagnostics about not liking them - but it shouldn't break the build. dnl We bark a warning here so that this test can be improved further if that occurs, dnl and err on the side of including rather than excluding extra warnings. AS_IF([test "$mu_cv_$1_flag_werror_uwo" = yes], [ACM_$1_WARNINGFAIL="-Werror=unknown-warning-option"], [AC_MSG_WARN([Don't know how to make $2 fail with unknown warning options,]) AC_MSG_WARN([so later tests may (wrongly) decide to pass them to it anyway.])]) ]) ACM_POP_VAR([$0],[$1FLAGS])dnl ACM_POP_LANG_FOR_FLAGS([$0],[$1])dnl ]) # _ACM_C_WERROR_UNKNOWN_WARNING_OPTION # # C language wrapper for _ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION # which can be AC_REQUIRE'd. # ------------------------------------------------------------------- AC_DEFUN([_ACM_C_WERROR_UNKNOWN_WARNING_OPTION], [dnl _ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION([C],[$CC]) ]) # _ACM_CXX_WERROR_UNKNOWN_WARNING_OPTION # # C++ language wrapper for _ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION # which can be AC_REQUIRE'd. # --------------------------------------------------------------------- AC_DEFUN([_ACM_CXX_WERROR_UNKNOWN_WARNING_OPTION], [dnl _ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION([CXX],[$CXX]) ]) # __ACM_ADD_COMPILER_WARNING([FLAGS_PREFIX],[WARNING_OPTION]) # # Implementation of _ACM_ADD_COMPILER_WARNING for doing the individual tests of # each of the specified WARNING_OPTIONS. The correct default language should be # already set, so here we test if -W${WARNING_OPTION} is supported, caching the # result of that test in mu_cv_${FLAGS_PREFIX}_flag_${CACHE_VAR_SUFFIX}, and # appending any supported warning options to ${FLAGS_PREFIX}FLAGS. # ------------------------------------------------------------------------------ AC_DEFUN([__ACM_ADD_COMPILER_WARNING], [dnl ACM_PUSH_VAL([$0],[$1FLAGS],[$ACM_$1_WARNINGFAIL])dnl __ACM_ADD_COMPILER_OPTION([$1],[-W$2]) AS_VAR_PUSHDEF([cachevar],[mu_cv_$1_flag_-W$2])dnl AS_VAR_IF([cachevar],[yes],[ACM_REPUSH_VAL([$0],[$1FLAGS],[-W$2])], [ACM_POP_VAR([$0],[$1FLAGS])]) AS_VAR_POPDEF([cachevar])dnl ]) # _ACM_ADD_COMPILER_WARNING([FLAGS_PREFIX],[WARNING_OPTIONS]) # # Implementation of ACM_ADD_COMPILER_WARNING for doing the individual tests with # each of the specified FLAGS_PREFIXES. This will temporarily switch the default # language based on FLAGS_PREFIX, then test if each of the comma-separated list # of WARNING_OPTIONS is supported for that language. # ------------------------------------------------------------------------------ AC_DEFUN([_ACM_ADD_COMPILER_WARNING], [ dnl We do this check before the AC_REQUIRE below, because the most likely cause dnl of this failing is a typo in user code invoking ACM_ADD_COMPILER_WARNING dnl and this gives a more user friendly warning pointing to the correct place dnl when the m4 is being processed by aclocal/autom4te, rather than having that dnl propagate deeper into the implementation detail before being caught. dnl dnl But it will not change the default toolchain for the invocation of the dnl requirement, since that gets expanded outside of the scope of the push/pop dnl used here, so it will need to do this itself as well to be run with the dnl correct toolchain. ACM_PUSH_LANG_FOR_FLAGS([$0],[$1])dnl AC_REQUIRE([_ACM_$1_WERROR_UNKNOWN_WARNING_OPTION])dnl ACM_FOREACH([warning_opt],[$2],[_$0([$1],m4_dquote(m4_expand([warning_opt])))]) ACM_POP_LANG_FOR_FLAGS([$0],[$1])dnl ]) # ACM_ADD_COMPILER_WARNING([FLAGS_PREFIXES],[WARNING_OPTIONS]) # # This is a specialisation of ACM_ADD_COMPILER_OPTION which handles the extra # hoops we need to jump through to test whether particular warning options are # actually supported or not (as opposed to ignoring unknown warning options). # # For each combination of the comma-separated FLAGS_PREFIXES and WARNING_OPTIONS # check if the corresponding compiler supports -W${WARNING_OPTION} and if it does # add it to the *FLAGS for that language for use when compiling subsequent source. # # Currently supported values for FLAGS_PREFIXES are C and CXX (use [C,CXX] to # test and set options for both the C and C++ compiler). The FLAGS_PREFIXES # must be a literal string - but the OPTIONS may be either literals, a shell # expression which expands to a comma-separated list, or a mixture of both. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_COMPILER_WARNING], [dnl dnl We can't use ACM_FOREACH here, because AC_LANG_PUSH only takes literals. m4_foreach([lang],[$1],[_$0(lang,[$2])]) ]) # __ACM_ADD_COMPILER_WARNING_QUIETLY([FLAGS_PREFIX],[WARNING_OPTION]) # # Implementation of ACM_ADD_COMPILER_WARNING_QUIETLY which does the real work # of checking the cache var for some FLAGS_PREFIX and WARNING_OPTION pair, and # delegating to ACM_ADD_COMPILER_WARNING if it is not already set. # ---------------------------------------------------------------------------- AC_DEFUN([__ACM_ADD_COMPILER_WARNING_QUIETLY], [dnl dnl There is no AS_VAR_CASE so it's nested IF instead of AS_CASEing a temp var. AS_VAR_PUSHDEF([cachevar],[mu_cv_$1_flag_-W$2])dnl AS_VAR_IF([cachevar],[yes],[ACM_ADD_OPT([$1FLAGS],[-W$2])], [AS_VAR_IF([cachevar],[no],[], [ACM_ADD_COMPILER_WARNING([$1],[$2])])])dnl AS_VAR_POPDEF([cachevar]) ]) # _ACM_ADD_COMPILER_WARNING_QUIETLY([FLAGS_PREFIX],[WARNING_OPTIONS]) # # Implementation of ACM_ADD_COMPILER_WARNING_QUIETLY for doing the individual # tests with each of the specified WARNING_OPTIONS for a given FLAGS_PREFIX. # ---------------------------------------------------------------------------- AC_DEFUN([_ACM_ADD_COMPILER_WARNING_QUIETLY], [dnl ACM_FOREACH([warning_opt],[$2],[_$0([$1],m4_dquote(m4_expand([warning_opt])))])dnl ]) # ACM_ADD_COMPILER_WARNING_QUIETLY([WARNING_OPTIONS],[FLAGS_PREFIXES]) # # This is a specialisation of ACM_ADD_COMPILER_WARNING which short circuits # the normal AC_CACHE_CHECK to first test the cache variable directly. There # are some tests (such as for attribute support) where we need to temporarily # add warning (and -Werror=) options to get a correct result about whether # what we are testing is supported or not, and sometimes the warning options # that we need to add are not universally supported and also need to be tested # themselves before being used. # # This macro avoids littering the configure output with repeated reports about # 'checking if $COMPILER supports $WARNING_OPTION... (cached)' # for each real option test that we want to perform. It will only output the # check for the supplementary WARNING_OPTIONS the first time it is performed, # and after that it will just silently add (or not) the WARNING_OPTIONS to the # desired set of FLAGS variables. # # If the cache variable for an option is not already set to 'yes' or 'no', # this macro behaves the same as a direct call to ACM_ADD_COMPILER_WARNING. # # If FLAGS_PREFIXES are not specified explicitly, it will default to using the # normal compiler {C,CXX,etc}FLAGS for the currently AC_LANG_PUSH'ed language. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_COMPILER_WARNING_QUIETLY], [dnl dnl We can't use ACM_FOREACH here, because AC_LANG_PUSH only takes literals. m4_foreach([lang],m4_default_quoted([$2],[ACM_LANG_PREFIX]),[_$0(lang,[$1])])dnl ]) # _ACM_ADD_LINKER_OPTION([OPTION]) # # Implementation of ACM_ADD_LINKER_OPTION for doing the individual tests for # each OPTION. The result is cached in AS_TR_SH'ified mu_cv_ldflag_${OPTION} # and supported options are appended to LDFLAGS. # ---------------------------------------------------------------------------- AC_DEFUN([_ACM_ADD_LINKER_OPTION], [ ACM_PUSH_VAL([$0],[LDFLAGS],[$1])dnl AS_VAR_PUSHDEF([cachevar],[mu_cv_ldflag_$1])dnl AC_CACHE_CHECK([if linker supports $1],[cachevar], [AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])], [AS_VAR_SET([cachevar],[yes])], [AS_VAR_SET([cachevar],[no])] ) ]) AS_VAR_IF([cachevar],[yes],[],[ACM_POP_VAR([$0],[LDFLAGS])])dnl AS_VAR_POPDEF([cachevar])dnl ]) # ACM_ADD_LINKER_OPTION([OPTIONS]) # # For each of the comma-separated OPTIONS, check if the linker supports that # option (using the current AC_LANG toolchain) and if it does then add it to # LDFLAGS for use when compiling subsequent source. The OPTIONS may be either # literals, a shell expression which expands to a comma-separated list, or a # mixture of both. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_LINKER_OPTION], [dnl ACM_FOREACH([linker_opt],[$1],[_$0(m4_dquote(m4_expand([linker_opt])))]) ]) # ACM_ADD_COMPILE_LINK_OPTION([FLAGS_PREFIXES],[OPTIONS]) # # For each combination of the comma-separated FLAGS_PREFIXES and OPTIONS check # if the corresponding compiler and linker supports that option. If so, then # it will be added to both ${FLAGS_PREFIX}FLAGS and LDFLAGS. The result of # compiler tests will be cached in mu_cv_${FLAGS_PREFIX}_flag_${OPTION}, and # the linker test, if performed, in mu_cv_ldflag_${OPTION}, converted to valid # shell variable names by AS_TR_SH. The linker test will only be performed # using the toolchain of the first of the FLAGS_PREFIXES, and only if the test # with that compiler succeeded. # # This macro is mostly useful for the case where some option needs to be passed # to both the compiler and linker to operate correctly. # # Currently supported values for FLAGS_PREFIXES are C and CXX (use [C,CXX] to # test and set options for both the C and C++ compiler). The FLAGS_PREFIXES # must be a literal string - but the OPTIONS may be either literals, a shell # expression which expands to a comma-separated list, or a mixture of both. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_COMPILE_LINK_OPTION], [dnl m4_pushdef([flagvars],m4_combine([,],[$1],[],[FLAGS]))dnl ACM_FOREACH([compile_link_opt],[$2], [ACM_PUSH_VAR([$0],[flagvars])dnl ACM_ADD_COMPILER_OPTION([$1],[compile_link_opt])dnl AS_VAR_PUSHDEF([compvar],[mu_cv_[]m4_car($1)[]_flag_[]compile_link_opt])dnl AS_VAR_IF([compvar],[yes], [ACM_PUSH_LANG_FOR_FLAGS([$0],m4_car($1))dnl _ACM_ADD_LINKER_OPTION(m4_dquote(m4_expand([compile_link_opt]))) AS_VAR_PUSHDEF([linkvar],[mu_cv_ldflag_[]compile_link_opt])dnl AS_VAR_IF([linkvar],[no],[ACM_POP_VAR([$0],[flagvars])]) AS_VAR_POPDEF([linkvar]) ACM_POP_LANG_FOR_FLAGS([$0],m4_car($1))dnl ], [ACM_POP_VAR([$0],[flagvars])]dnl ) AS_VAR_POPDEF([compvar])dnl ]) m4_popdef([flagvars])dnl ]) # _ACM_ADD_SANITIZER([SANITIZER]) # # Implementation of ACM_ADD_SANITIZER for doing the individual tests of each # requested SANITIZER. It will test both the C and C++ toolchains and update # LDFLAGS if it can be enabled. Right now, that does assume that if it works # for one it will work for the other too, because we don't have separate C or # C++ LDFLAGS for projects which use both. # # This doesn't handle the case of the thread sanitiser with older toolchains, # which require explicit additional options to build the position independent # executables it needs to work. With newer compilers, enabling it should do # that automatically, if they don't already default to using PIE anyway. We # can't easily add those options here, because we don't have option variables # that are specific to libraries or executables, and we need to use different # options for this depending on which we are building. If we ever really do # need this with older toolchains we can look at splitting those further too. # ---------------------------------------------------------------------------- AC_DEFUN([_ACM_ADD_SANITIZER], [ ACM_ADD_COMPILER_OPTION([C,CXX],[-fsanitize=$1]) dnl If we added it to C/CXXFLAGS, we need to add it to LDFLAGS too. dnl And we special case for the sanitisers where we know they need, dnl or would benefit from, some additional compiler options. AS_VAR_PUSHDEF([cachevar],[mu_cv_C_flag_-fsanitize=$1])dnl AS_VAR_IF([cachevar],[yes],[ ACM_ADD_OPT([LDFLAGS],[-fsanitize=$1]) AS_CASE([$1], [address|memory|undefined], [ACM_ADD_UNIQUE([CFLAGS,CXXFLAGS],[-fno-omit-frame-pointer])] )dnl ])dnl AS_VAR_POPDEF([cachevar])dnl ]) # ACM_ADD_SANITIZER([SANITIZERS]) # # Test which of the comma-separated list of SANITIZERS are supported, and add # the necessary compile and link time options to enable them. You should list # them in order of preference, as some of them may preclude the use of others. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_SANITIZER], [ ACM_FOREACH([san_type],[$1],[_$0(san_type)]) ]) # ACM_SUPPRESS_LSAN([ID]) # # There are some autoconf tests which leak memory when run, and so will fail # if the LeakSanitiser is active (the AM_ICONV and AC_FUNC_MMAP macros are # known offenders, and there are probably more). This macro can be used to # temporarily disable the LSan checking while running those tests. The ID is # a local identifier to use when saving the previous state, so that calls to # this may be nested if needed. It should only contain characters that are # safe to use in a shell variable name. To restore the original state again # use ACM_RESTORE_LSAN with the same ID as was passed here. If nested, the # IDs must be restored in the reverse order that they were suppressed in. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_SUPPRESS_LSAN], [dnl AS_VAR_PUSHDEF([asan],[mu_cv_C_flag_-fsanitize=address])dnl AS_VAR_PUSHDEF([lsan],[mu_cv_C_flag_-fsanitize=leak])dnl mu_lsan_enabled=no AS_VAR_IF([asan],[yes],[mu_lsan_enabled=yes]) AS_VAR_IF([lsan],[yes],[mu_lsan_enabled=yes]) AS_IF([test "$mu_lsan_enabled" = yes],[ AS_VAR_SET_IF([mu_suppress_lsan_$1], [AC_MSG_ERROR([LSan suppression for '$1' is already active])]) AC_MSG_NOTICE([Disabling LSan for $1 ...]) AS_IF([test -n "$ASAN_OPTIONS"],[ AS_VAR_SET([mu_suppress_lsan_$1],[$ASAN_OPTIONS]) export ASAN_OPTIONS="$ASAN_OPTIONS:detect_leaks=0" ],[ AS_VAR_SET([mu_suppress_lsan_$1],[yes]) export ASAN_OPTIONS="detect_leaks=0" ]) ]) AS_VAR_POPDEF([lsan])dnl AS_VAR_POPDEF([asan])dnl ]) # ACM_DEFINE_HAVE_SANITIZERS # # Provides EM_HAVE_ASAN and EM_HAVE_TSAN when those sanitisers are enabled. # We probably could run configure time checks for this, but in theory it's # easy enough to do this at compile time, so just squirt out the relevant # macro tests to the config header. This version will still work if the # user just runs make with a different environment rather than reconfiguring # with --enable-san or similar. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_DEFINE_HAVE_SANITIZERS], [ AC_MSG_NOTICE([Including EM_HAVE_ASAN/TSAN preprocessor macros ...]) AH_VERBATIM([EM_HAVE_ASAN_TSAN], [ #ifdef __has_feature #if __has_feature(address_sanitizer) #define EM_HAVE_ASAN 1 #endif #elif defined(__SANITIZE_ADDRESS__) /* This macro is defined if building with the -fsanitize=address option. */ #define EM_HAVE_ASAN 1 #endif #ifdef __has_feature #if __has_feature(thread_sanitizer) #define EM_HAVE_TSAN 1 #endif #elif defined(__SANITIZE_THREAD__) /* This macro is defined if building with the -fsanitize=thread option. */ #define EM_HAVE_TSAN 1 #endif ])dnl ]) # ACM_RESTORE_LSAN([ID]) # # Restore the LSan state which existed prior to ACM_SUPPRESS_LSAN([ID]) being # called. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_RESTORE_LSAN], [dnl AS_VAR_SET_IF([mu_suppress_lsan_$1],[ AC_MSG_NOTICE([Restoring LSan after $1 ...]) AS_UNSET([ASAN_OPTIONS]) AS_VAR_IF([mu_suppress_lsan_$1],[yes],[], [AS_VAR_COPY([ASAN_OPTIONS],[mu_suppress_lsan_$1]) export ASAN_OPTIONS ]) AS_UNSET([mu_suppress_lsan_$1]) ],[ AS_IF([test "$mu_lsan_enabled" = yes], [AC_MSG_ERROR([LSan restore requested, but suppression for '$1' is not active])]) ]) ]) # ACM_CHECK_ATTRIBUTE([TYPE],[ATTRIBUTE],[TEST-PROGRAM],[ACTION],[ACTION-OPTIONS...]) # # Test if the current AC_LANG compiler supports ATTRIBUTE. The attribute TYPE # (function, variable, type, etc.) is used for informative messages, variable # and CPP macro names, and by higer level macros for the selection of default # options. The ATTRIBUTE string should be the literal to use within the double # parenthesis of an attibute declaration ie. __attribute__((ATTRIBUTE)). For # attributes with arguments of their own, the arguments used in the ATTRIBUTE # may be placeholder names of the form which could be substituted by a # function-like macro in user code. # # The TEST-PROGRAM should be created by AC_LANG_PROGRAM or be of the same form # as if it was, and should make use of the ATTRIBUTE in the prologue or body # as appropriate to test compiler support for it. An ATTRIBUTE with arguments # must use some representative values for those arguments in the test code. # # If an ACTION macro is passed, it will be expanded with 'cachevar' holding # the (cached) compile test result, ATTR_ID holding an uppercased version of # the ATTRIBUTE string with any non-alphanumeric characters transformed to be # shell and CPP macro safe, and all of $@ passed to it verbatim as arguments. # Any further ACTION-OPTIONS arguments are simply passed through to the ACTION # macro, this macro does nothing with or to those. # # User code generally won't use this macro directly unless it has some very # special case to handle. The more specialised ACM_HAVE_* and ACM_DEFINE_* # macros for each attribute type are more probably what you want as (unlike # this macro which just tests for support) they define CPP macros that user # code can test or employ directly to use any supported attributes. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_CHECK_ATTRIBUTE], [dnl dnl helper macro for building the extra_warnings list. m4_pushdef([add_extra_warning],[m4_append([extra_warnings],]m4_dquote($[]1)[,[,])])dnl dnl dnl GCC 8.3.0 needs -Werror=attributes to make warnings about any unrecognised dnl attribute fail at compilation time. m4_pushdef([extra_warnings],[error=attributes])dnl dnl dnl But Clang 7.0.1 also needs -Werror=unknown-sanitizers to fail on unknown dnl parameters to the no_sanitize() attribute. And strictly it appears to need dnl -Wunknown-attributes for other unknowns, but that is enabled by default and dnl -Wattributes there implies -Wignored-attributes and -Wunknown-attributes. dnl dnl At the time of writing, no_sanitize* attributes are only applicable as a dnl function attribute, but that may not always stay true, and this test is dnl eliminated at macro expansion time if it's not needed, so leaving it here dnl for now seems cheap, reasonable, and a small bet on possibly future proof. m4_bmatch([$2],[no_sanitize.*],[add_extra_warning([error=unknown-sanitizers])])dnl dnl dnl Clang versions which don't support statement attributes (for fallthrough) dnl complain about them in this way: dnl warning: declaration does not declare anything [-Wmissing-declarations] m4_if([$1],[statement],[add_extra_warning([error=missing-declarations])])dnl dnl ACM_PUSH_VAR([$0],[ACM_LANG_PREFIX[]FLAGS])dnl ACM_ADD_COMPILER_WARNING_QUIETLY([extra_warnings])dnl m4_popdef([extra_warnings],[add_extra_warning])dnl dnl AS_VAR_PUSHDEF will mangle ATTRIBUTE with AS_TR_SH to make it shell-safe. dnl We do a similar transform to the uppercased ATTR_ID for the CPP macro dnl name, first stripping backslash, single, and double quotes, then replacing dnl any other remaining non-alphanumeric characters with underscores. dnl dnl We don't use AS_TR_CPP for this as it has a very conservative opinion on dnl what a 'literal' is in this context, and many valid expected characters dnl in an attribute will cause it to select a runtime shell transform instead dnl of an m4 literal expansion, but ATTRIBUTE should always be a literal here. m4_pushdef([ATTR_ID],[m4_toupper(ACM_TR_SH_LITERAL($2))])dnl AS_VAR_PUSHDEF([cachevar],[mu_cv_[]ACM_LANG_ABBREV[]_$1_attr_$2])dnl AC_CACHE_CHECK([if ACM_LANG_COMPILER supports $1 attribute (($2))],[cachevar], [AC_COMPILE_IFELSE([$3], [AS_VAR_SET([cachevar],[yes])], [AS_VAR_SET([cachevar],[no])]) ]) m4_ifval([$4],[$4($@)])dnl ACM_POP_VAR([$0],[ACM_LANG_PREFIX[]FLAGS])dnl AS_VAR_POPDEF([cachevar])dnl m4_popdef([ATTR_ID])dnl ]) # _acm_attribute_types, _ACM_ATTRIBUTE_TYPES # # The list of expected attribute types that we specialise macros for. # for each attribute type in this list we define: # # ACM_CHECK_[TYPE]_ATTRIBUTE # ACM_HAVE_[TYPE]_ATTRIBUTE # ACM_DEFINE_[TYPE]_ATTRIBUTE # # Any additions to this list should also define a suitable default test for # that type of attribute in an _acm_check_[type]_attr_prologue macro below # (and/or _acm_check_[type]_attr_body as needed). # ---------------------------------------------------------------------------- m4_define([_acm_attribute_types],[function,type,variable,label,enum,statement]) m4_define([_ACM_ATTRIBUTE_TYPES],m4_toupper(m4_dquote(_acm_attribute_types))) # _acm_check_[type]_attr_prologue([ATTRIBUTE]) # # ACM_CHECK_ATTRIBUTE TEST-PROGRAM prologue snippets for the default test used # by each attribute TYPE. It may not be suitable for all possible attributes # of that TYPE, but it should Just Work for as many of the most commonly used # ones as possible. # # These will be used if the TEST-PROLOGUE argument is not explicitly specified # when the ACM_CHECK_[TYPE]_ATTRIBUTE macros are expanded. If no prologue # snippet is defined for the TYPE in question, the prologue (before main() in # the test code) will be empty by default. # ---------------------------------------------------------------------------- dnl ----- Test for function attributes --------------------------------------- m4_define([_acm_check_function_attr_prologue], [dnl dnl We need the void parameter declaration for C, or the compiler will not consider dnl this a function prototype, which can be significant for testing some attributes dnl including those like 'format' which needs to check the function arguments. AC_LANG_CASE([C],[[int f(void) __attribute__(($1));]], [[int f() __attribute__(($1));]])dnl ]) dnl ----- Test for type attributes ------------------------------------------- m4_define([_acm_check_type_attr_prologue], [[struct S { int i; } __attribute__(($1));]]) dnl ----- Test for variable attributes --------------------------------------- m4_define([_acm_check_variable_attr_prologue], [[char c[4] __attribute__(($1));]]) dnl ----- Test for label attributes ------------------------------------------ m4_define([_acm_check_label_attr_prologue], [dnl void f(int i) { Again: __attribute__(($1)); while (i++ < 10) goto Again; }dnl ]) dnl ----- Test for enumerator attributes ------------------------------------- m4_define([_acm_check_enum_attr_prologue], [dnl enum E { A __attribute__(($1)), B __attribute__(($1)) };dnl ]) dnl ----- Test for null statement attributes --------------------------------- m4_define([_acm_check_statement_attr_prologue], [dnl int f(int i); int f(int i) { switch(i) { case 1: __attribute__(($1)); case 2: return i; } return 0; }dnl ]) # _acm_check_[type]_attr_body([ATTRIBUTE]) # # ACM_CHECK_ATTRIBUTE TEST-PROGRAM body snippets for the default test used by # each attribute TYPE. It may not be suitable for all possible attributes of # that TYPE, but it should Just Work for as many of the most commonly used # ones as possible. # # These will be used if the TEST-BODY argument is not explicitly specified # when the ACM_CHECK_[TYPE]_ATTRIBUTE macros are expanded. If no body snippet # is defined for the TYPE in question, the body (inside main() in the test # code) will be empty by default. # ---------------------------------------------------------------------------- dnl In practice we don't need any body code for the default type tests (yet). dnl A test program 'body' for 'statement' attributes can be defined like this: dnl m4_define([_acm_check_statement_attr_body],[[f(10);]]) # ACM_CHECK_[TYPE]_ATTRIBUTE([ATTRIBUTE],[TEST-PROLOGUE],[TEST-BODY],[ACTION],[ACTION-OPTIONS...]) # # Specialisation of ACM_CHECK_ATTRIBUTE for each attribute TYPE that attrtype # loops over here. Imports the (overridable) default test prologue and body # for each type if an _acm_check_[type]_attr_{prologue,body} macro is defined. # # Defines the following specialised convenience macros: # # ACM_CHECK_FUNCTION_ATTRIBUTE # ACM_CHECK_TYPE_ATTRIBUTE # ACM_CHECK_VARIABLE_ATTRIBUTE # ACM_CHECK_LABEL_ATTRIBUTE # ACM_CHECK_ENUM_ATTRIBUTE # ACM_CHECK_STATEMENT_ATTRIBUTE # # Like ACM_CHECK_ATTRIBUTE, user code generally won't need to use these macros # directly and will use the more specialised ACM_HAVE_* and ACM_DEFINE_* which # provide an ACTION to define CPP macros in the configuration header. # ---------------------------------------------------------------------------- m4_foreach([attrtype],[_acm_attribute_types],[dnl AC_DEFUN([ACM_CHECK_]m4_toupper(attrtype)[_ATTRIBUTE], [dnl m4_pushdef([attrtest],[_acm_check_]]attrtype[[_attr])dnl m4_pushdef([prologue],[m4_ifdef(attrtest[_prologue],attrtest[_prologue($1)])])dnl m4_pushdef([body], [m4_ifdef(attrtest[_body], attrtest[_body($1)])])dnl ACM_CHECK_ATTRIBUTE(]attrtype[,[$1], [AC_LANG_PROGRAM([m4_default([$2],[prologue])], [m4_default([$3],[body])])dnl ],[$4],m4_shift(m4_shift3($@)))dnl m4_popdef([body],[prologue],[attrtest])dnl ]) ]) # ACM_CHECK_FORMAT_ATTRIBUTE([FORMAT-STYLE],[ARG1],[ACTION],[ACTION-OPTIONS...]) # # Specialisation of ACM_CHECK_ATTRIBUTE for function attributes that annotate # format strings which can and should be sanity checked. The FORMAT-STYLE is # one of what GCC calls ARCHETYPES - printf, scanf, strftime, strfmon - which # determine how the format string itself is interpreted. # # The ARG1 parameter is the value to use for the 3rd (FIRST-TO-CHECK) argument # of the format() attribute. It indicates which argument in the test function # contains the first value to be formatted (which for the test that is defined # by this macro whould be 2), but must be 0 in the case of strftime, or if the # function takes a va_list instead of the explicit arguments (like vprintf). # If ARG1 is set to 0, then we use an open coded 0 in the attribute definition # instead of the function-like macro parameter name arg1. # # The ACTION macro, and ACTION-OPTIONS are passed verbatim to the expansion of # ACM_CHECK_ATTRIBUTE. # # This macro assumes that the __gnu_* format conventions are being used when # compiling for windows instead of the __ms_* ones - as sane code that builds # with mingw these days will use __USE_MINGW_ANSI_STDIO which is enabled when # _GNU_SOURCE is defined and provides an implementation which supports the # full set of POSIX format characters, not just the sub-standard horror show # from msvcrt.dll ... We could make that configurable via an extra parameter # here, or maybe some --enable flag to choose the implementation, but worry # about that if the GNU compatible version is ever not the only sane choice. # There will surely be other insanity to deal with if that ever is the case. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_CHECK_FORMAT_ATTRIBUTE], [dnl m4_pushdef([prologue], [void p(const char*,...) __attribute__((format (__]$[]1[__, 1, m4_default([$2],[2]))));])dnl AS_CASE([$host], [*-*-cygwin* | *-*-mingw32*],[dnl ACM_CHECK_ATTRIBUTE([function],[format (__gnu_$1__,fmt,]m4_if([$2],[0],[0],[arg1])[)], [AC_LANG_PROGRAM([prologue([gnu_$1])],[[]])], [$3],m4_shift3($@))dnl ],[dnl ACM_CHECK_ATTRIBUTE([function],[format (__$1__,fmt,]m4_if([$2],[0],[0],[arg1])[)], [AC_LANG_PROGRAM([prologue([$1])],[[]])], [$3],m4_shift3($@))dnl ])dnl m4_popdef([prologue])dnl ]) # _ACM_HAVE_ATTRIBUTE([TYPE],[ATTRIBUTE],[TEST-PROGRAM],[this macro],[CPP-MACRO]) # # ACM_CHECK_[TYPE]_ATTRIBUTE ACTION macro used by ACM_HAVE_[TYPE]_ATTRIBUTE. # If the CPP-MACRO argument is passed, that macro will be AC_DEFINE'd with # the numeric value 1 if ATTRIBUTE is supported as used by TEST-PROGRAM. # # If CPP-MACRO is not explicitly named, a HAVE_ macro name will be constructed # using the AC_LANG_PREFIX for the current test language, the attribute TYPE, # and the sanitised-for-CPP ATTRIBUTE string (ATTR_ID): # # HAVE_[ACM_LANG_PREFIX]_[ATTR_TYPE]_ATTRIBUTE_[ATTR_ID] # # That macro can be used as a conditional guard by user code which includes # the autoheader generated configuration header. # ---------------------------------------------------------------------------- AC_DEFUN([_ACM_HAVE_ATTRIBUTE], [dnl m4_pushdef([ATTR_TYPE],[m4_toupper([$1])])dnl AS_VAR_IF([cachevar],[yes], [AC_DEFINE(m4_default([$5],[HAVE_[]ACM_LANG_PREFIX[]_[]ATTR_TYPE[]_ATTRIBUTE_[]ATTR_ID]), [1], [Have support for $1 attribute '$2'])]) m4_popdef([ATTR_TYPE])dnl ]) # _ACM_DEFINE_ATTRIBUTE([TYPE],[ATTRIBUTE],[TEST-PROGRAM],[this macro],[CPP-MACRO],[ELSE-DEF]) # # ACM_CHECK_[TYPE]_ATTRIBUTE ACTION macro used by ACM_DEFINE_[TYPE]_ATTRIBUTE. # If the CPP-MACRO argument is passed, that macro will be AC_DEFINE_UNQUOTED # using the ATTRIBUTE literal if it is supported as used by TEST-PROGRAM. # # If CPP-MACRO is not explicitly named, a macro name will be constructed using # the AC_LANG_PREFIX for the current test language, the attribute TYPE, and the # sanitised-for-CPP ATTRIBUTE string (ATTR_ID): # # EM_[ACM_LANG_PREFIX]_[ATTR_TYPE]_ATTRIBUTE_[ATTR_ID] # # That macro can be used in user code which includes the autoheader generated # configuration header in place of the __attribute((ATTRIBUTE)) literal string # anywhere the attribute itself would be used. # # If the attribute itself takes 'variable' arguments, then a function-like # CPP-MACRO can be defined with the same argument names used in the ATTRIBUTE # definition, but the TEST-PROGRAM must use the attribute with representative # real values used in place of the function-like macro argument names. # For example: # # ACM_DEFINE_VARIABLE_ATTRIBUTE([aligned(n)], # [EM_ALIGNED(n)], # [[char c[4] __attribute__((aligned(4)));]]) # # ---------------------------------------------------------------------------- AC_DEFUN([_ACM_DEFINE_ATTRIBUTE], [dnl m4_pushdef([ATTR_TYPE],[m4_toupper([$1])])dnl AS_VAR_IF([cachevar],[yes], [acm_attr_defn="AS_ESCAPE([__attribute__(($2))])"], [acm_attr_defn="AS_ESCAPE([$6])"])dnl AC_DEFINE_UNQUOTED(m4_default([$5],[EM_[]ACM_LANG_PREFIX[]_[]ATTR_TYPE[]_ATTR_[]ATTR_ID]), [$acm_attr_defn], [Macro for $1 attribute '$2'])dnl m4_popdef([ATTR_TYPE])dnl ]) # ACM_HAVE_[TYPE]_ATTRIBUTE([ATTRIBUTE],[CPP-MACRO],[TEST-PROLOGUE],[TEST-BODY]) # ACM_DEFINE_[TYPE]_ATTRIBUTE([ATTRIBUTE],[CPP-MACRO],[TEST-PROLOGUE],[TEST-BODY],[ELSE-DEF]) # # Defines the following specialised convenience macros: # # ACM_HAVE_FUNCTION_ATTRIBUTE, ACM_DEFINE_FUNCTION_ATTRIBUTE # ACM_HAVE_TYPE_ATTRIBUTE, ACM_DEFINE_TYPE_ATTRIBUTE # ACM_HAVE_VARIABLE_ATTRIBUTE, ACM_DEFINE_VARIABLE_ATTRIBUTE # ACM_HAVE_LABEL_ATTRIBUTE, ACM_DEFINE_LABEL_ATTRIBUTE # ACM_HAVE_ENUM_ATTRIBUTE, ACM_DEFINE_ENUM_ATTRIBUTE # ACM_HAVE_STATEMENT_ATTRIBUTE, ACM_DEFINE_STATEMENT_ATTRIBUTE # # The ACM_HAVE_* macros define a HAVE_* guard macro which can be used to wrap # code that is conditional on the ATTRIBUTE being supported. # # The ACM_DEFINE_* macros define a (possibly function-like) symbol macro which # may be used in place of the __attribute__((ATTRIBUTE)) literal when simply # omitting it if it is not supported is an acceptable choice. # # If the CPP-MACRO name is not explicitly specified, then a constructed name # will be used based on the current AC_LANG, the attribute TYPE, and the # ATTRIBUTE name itself. # # If TEST-PROLOGUE or TEST-BODY are not explicitly specified, then the default # test for the given attribute TYPE will be used. # # If ELSE-DEF is provided, then the CPP-MACRO will be defined with that value # instead of an empty definition if ATTRIBUTE is not supported. # # For example: # ACM_HAVE_FUNCTION_ATTRIBUTE([no_sanitize("unsigned-integer-overflow")]) # ACM_DEFINE_FUNCTION_ATTRIBUTE([unused]) # ACM_DEFINE_TYPE_ATTRIBUTE([packed],[EM_PACKED]) # ACM_DEFINE_STATEMENT_ATTRIBUTE([fallthrough],[EM_FALLTHROUGH],[],[], # [do {} while(0)]) # ---------------------------------------------------------------------------- m4_foreach([act],[HAVE,DEFINE], [m4_foreach([type],[_ACM_ATTRIBUTE_TYPES], [AC_DEFUN([ACM_]act[_]type[_ATTRIBUTE], [ACM_CHECK_]type[_ATTRIBUTE([$1],[$3],[$4],[_ACM_]]act[[_ATTRIBUTE],[$2],[$5])]) ]) ]) # _ACM_DEFINE_FORMAT_ATTRIBUTE([FORMAT-STYLE],[ARG1],[CPP-MACRO]) # # ACM_CHECK_FORMAT_ATTRIBUTE ACTION macro used to implement the specialised # ACM_DEFINE_[STYLE]_FORMAT_ATTRIBUTE macros. # # The ARG1 value is used as the FIRST-TO-CHECK parameter in the TEST-PROLOGUE. # If ARG1 is set to 0, then we use an open coded 0 in the attribute definition # instead of the function-like mcro parameter name arg1, and we only accept a # single fmt parameter in the macro # # If that test succeeds, a function-like CPP macro is defined of form: # # EM_[STYLE]_FORMAT( fmt, arg1 ) __attribute__((format (__style__, fmt, arg1))) # # with an empty definition if the test failed to indicate support. # # If CPP-MACRO is explicitly specified, then that name will be used for the # function-like CPP macro instead of EM_[STYLE]_FORMAT. # ---------------------------------------------------------------------------- AC_DEFUN([_ACM_DEFINE_FORMAT_ATTRIBUTE], [dnl m4_pushdef([macro_name],m4_default([$3],[EM_]m4_toupper([$1])[_FORMAT]))dnl ACM_CHECK_FORMAT_ATTRIBUTE([$1],[$2],[_ACM_DEFINE_ATTRIBUTE], [macro_name[]( fmt[]m4_if([$2],[0],[],[, arg1]) )])dnl m4_popdef([macro_name])dnl ]) # ACM_DEFINE_[FORMAT-STYLE]_FORMAT_ATTRIBUTE([CPP-MACRO]) # # Defines the following specialised convenience macros: # # ACM_DEFINE_PRINTF_FORMAT_ATTRIBUTE # ACM_DEFINE_SCANF_FORMAT_ATTRIBUTE # ACM_DEFINE_STRFTIME_FORMAT_ATTRIBUTE # ACM_DEFINE_STRFMON_FORMAT_ATTRIBUTE # # Which test support for the respective format attributes, and if it is # available define function-like macros to enable their use, for example: # # EM_PRINTF_FORMAT( fmt, arg1 ) __attribute__((format (__printf__,fmt,arg1))) # EM_SCANF_FORMAT( fmt, arg1 ) __attribute__((format (__scanf__,fmt,arg1))) # EM_STRFTIME_FORMAT( fmt ) __attribute__((format (__strftime__,fmt,0))) # EM_STRFMON_FORMAT( fmt ) __attribute__((format (__strfmon__,fmt,arg1))) # # If CPP-MACRO is explicitly specified, then that name will be used for the # function-like CPP macro instead of EM_[STYLE]_FORMAT. # ---------------------------------------------------------------------------- m4_foreach([fmt],[[printf,2],[scanf,2],[strfmon,2],[strftime,0]], [AC_DEFUN([ACM_DEFINE_]m4_toupper(m4_car(fmt))[_FORMAT_ATTRIBUTE], [_ACM_DEFINE_FORMAT_ATTRIBUTE(]m4_dquote(fmt)[,[$1])]) ]) # ACM_ADD_MISSING_DEP([NAME]) # # Add NAME to the list of missing dependencies which a later call to # ACM_CHECKPOINT_MISSING_DEPS should check and report on. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_ADD_MISSING_DEP], [dnl ACM_ADD_OPT([mu_missing_deps],[$1],[, ])dnl ]) # _ACM_CHECKPOINT_MISSING_DEPS([CHECK-VAR]) # # Implementation of ACM_CHECKPOINT_MISSING_DEPS # ---------------------------------------------------------------------------- AC_DEFUN([_ACM_CHECKPOINT_MISSING_DEPS], [dnl AS_IF([test -n "$$1"], [AC_MSG_NOTICE([]) AC_MSG_NOTICE([Some required dependencies were not found.]) AC_MSG_NOTICE([Please install: $$1]) AC_MSG_NOTICE([]) AC_MSG_ERROR([Cannot continue until this is resolved.]) ] ) ]) # ACM_CHECKPOINT_MISSING_DEPS([CHECK-VAR]) # # Test if CHECK-VAR is empty, and if it is not then error out, reporting what # it contains. It is expected to contain cached information about the missing # dependencies which didn't cause configure to abort until as complete a list # as possible of all the missing dependencies could be obtained. # # This avoids the annoying whack-a-mole routine of installing something to fix # a missing dependency, only to then have it fail again with yet another one. # If CHECK-VAR is not explicitly specified, then mu_missing_deps will be used, # which is where ACM_ADD_MISSING_DEP will accumulate them. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_CHECKPOINT_MISSING_DEPS], [dnl _$0(m4_default([$1],[mu_missing_deps])) ]) # ACM_LONG_OPTIONS([OPTIONS]) # # Expands a comma-separated list of OPTIONS to a space-separated one with each # option appended to a '--' for use as long option arguments to some command. # This macro is just syntactic sugar to keep the point of use easily readable. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_LONG_OPTIONS],[m4_foreach([opt],[$1],[--opt ])]) # ACM_PKG_CONFIG([MIN-VERSION]) # # This is a safe and portable wrapper to check if pkg-config is available, and # optionally, that it is no older than MIN-VERSION. It avoids the problem with # the normal circular dependency where the macro to check for pkg-config is in # the same package as pkg-config is, potentially making everything fall apart # horribly and confusingly if it is not. # # If the pkg.m4 test macro is available, then we'll use it to set PKG_CONFIG, # otherwise we'll just note that it's not and leave that unset. By design it # is not an error for pkg-config to not be available, nor is it a hard error # for the version constraint to fail even if some version is available. Most # things can have safe and sane fallbacks if pkg-config can't be queried, so # it is up to the caller to fail out if they do have some hard requirement on # it that can't be satisfied in any other way. But usually that's not really # what most things should do, because most .pc files have almost zero entropy. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_PKG_CONFIG], [dnl m4_ifdef([PKG_PROG_PKG_CONFIG], [PKG_PROG_PKG_CONFIG([$1])], [AC_MSG_NOTICE([pkg-config pkg.m4 is not installed])]) ]) # ACM_PKG_CONFIG_GET([LOCAL-VARIABLE],[MODULE],[OPTIONS],[DEFAULT],[FILTER]) # # If LOCAL-VARIABLE is not already set, either from the environment, or on the # command-line of configure, or by some prior action of the configure script, # then try to obtain a value for it from the pkg-config .pc for MODULE, with a # query defined by the comma-separated list of pkg-config OPTIONS. # # If pkg-config is not available, or if the query fails, then set it to the # given DEFAULT value. # # If the optional FILTER parameter is passed, then if the result is obtained # from pkg-config, that macro will also be expanded with LOCAL-VARIABLE passed # as a parameter after its value has been assigned to what pkg-config returned. # This allows it to be modified if needed, before the result is reported and # subsequently used. If the value is obtained from the environment or the # DEFAULT is used, then the FILTER parameter is ignored. # # For example: # ACM_PKG_CONFIG_GET([FOO_FLAGS],[foo],[cflags,libs],[-lfoo]) # # Will try to set FOO_FLAGS using `pkg-config --cflags --libs foo` if possible # else fall back to a default of using '-lfoo'. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_PKG_CONFIG_GET], [dnl AC_REQUIRE([ACM_PKG_CONFIG]) AC_MSG_CHECKING([for $1]) AS_IF([test -n "$$1"], [AC_MSG_RESULT(['$$1' (from environment)])], [test -z "$PKG_CONFIG"], [AC_MSG_RESULT(['$4' (default value)]) $1="$4" ], [$1=$($PKG_CONFIG ACM_LONG_OPTIONS([$3])"$2" 2>/dev/null)], [m4_ifval([$5],[$5([$1])])dnl AC_MSG_RESULT(['$$1' (from $2.pc $($PKG_CONFIG --modversion $2 2>/dev/null))])], [AC_MSG_RESULT(['$4' (default value)]) AC_MSG_WARN(['pkg-config ACM_LONG_OPTIONS([$3])$2' failed]) $1="$4" ]dnl )dnl ]) # ACM_PKG_CONFIG_GET_VAR([LOCAL-VARIABLE],[MODULE],[CONFIG-VARIABLE],[DEFAULT]) # # If LOCAL-VARIABLE is not already set, either from the environment, or on the # command-line of configure, or by some prior action of the configure script, # then try to obtain a value for it from the variable CONFIG-VARIABLE set in # the pkg-config .pc for MODULE. # # If pkg-config is not available, or if the query fails, then set it to the # given DEFAULT value. # # For example: # ACM_PKG_CONFIG_GET_VAR([FOO_DIR],[foo],[foodir],[/tmp/foo]) # # Will try to set FOO_DIR using `pkg-config --variable=foodir foo` if possible # else fall back to a default of using '/tmp/foo'. # # This is syntactic sugar equivalent to: # ACM_PKG_CONFIG_GET([FOO_DIR],[foo],[variable="foodir"],[/tmp/foo]) # ---------------------------------------------------------------------------- AC_DEFUN([ACM_PKG_CONFIG_GET_VAR], [dnl ACM_PKG_CONFIG_GET([$1],[$2],[variable="$3"],[$4]) ]) # ACM_PC_LIBS([LOCAL-VARIABLE]) # # This is a filter macro used by ACM_PKG_CONFIG_GET_LIBS to strip the leading # -l from the list of libraries returned by pkg-config to put them into the # form expected by makeup *_LIBS variables where only the library name is used. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_PC_LIBS], [dnl $1=$(echo "$$1" | [sed 's/^-l//; s/[[:space:]]-l/ /g']) ]) # ACM_PKG_CONFIG_GET_LIBS([LOCAL-VARIABLE],[MODULE],[DEFAULT],[STATIC-EXTRA]) # # This is a specialisation of ACM_PKG_CONFIG_GET for fetching the --libs-only-l # list of libraries to link with, but with the -l prefixes removed to give them # to us in the form that makup expects for *_LIBS. # # If LOCAL-VARIABLE is not already set, either from the environment, or on the # command-line of configure, or by some prior action of the configure script, # then try to obtain a value for it from the pkg-config .pc for MODULE. If the # mu_cv_enable_shared cache variable is set to 'no' then the query will include # the --static option to pkg-config to also obtain the Libs.private needed for # static linking. # # If pkg-config is not available, or if the query fails, then set it to the # given DEFAULT value. The STATIC-EXTRA value will be appended to that in the # case where Libs.private would have been included as described above. # If a DEFAULT value is not passed then the value of MODULE will be used. # # For example: # ACM_PKG_CONFIG_GET_LIBS([FOO_LIBS],[libfoo],[foo],[bar z m]) # # Will try to set FOO_LIBS to the list of libraries that are returned by # `pkg-config --libs-only-l libfoo` if possible, else fall back to a default # of using 'foo'. Or in the case where static linking is configured, it will # try `pkg-config --static --libs-only-l libfoo` falling back to 'foo bar z m' # so that libbar, libz, and libm are also included when static linking. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_PKG_CONFIG_GET_LIBS], [dnl AS_IF([test "$mu_cv_enable_shared" = no], [ACM_PKG_CONFIG_GET([$1],[$2],[static,libs-only-l],m4_join([ ],m4_default([$3],[$2]),[$4]),[ACM_PC_LIBS])], [ACM_PKG_CONFIG_GET([$1],[$2],[libs-only-l],m4_default([$3],[$2]),[ACM_PC_LIBS])]) ]) # ACM_PC_LDFLAGS([LOCAL-VARIABLE]) # # This is a filter macro used by ACM_PKG_CONFIG_GET_LDFLAGS to strip all words # beginning with '-l' from the string returned by pkg-config leaving only the # options which should be passed as *_LDFLAGS without any of the libraries. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_PC_LDFLAGS], [dnl $1=$(echo "$$1" | [sed 's/^\(-l[^[:space:]]\+[[:space:]]*\)\+//; s/[[:space:]]\+-l[^[:space:]]\+//g']) ]) # ACM_PKG_CONFIG_GET_LDFLAGS([LOCAL-VARIABLE],[MODULE],[DEFAULT],[STATIC-EXTRA]) # # This is a specialisation of ACM_PKG_CONFIG_GET to work around the fact that # --libs-only-L does not return all of the LDFLAGS specified for linking, only # the linker search paths, and there is no way to get just those aside from # obtaining the entire --libs output and filtering out the libraries from it. # # If LOCAL-VARIABLE is not already set, either from the environment, or on the # command-line of configure, or by some prior action of the configure script, # then try to obtain a value for it from the pkg-config .pc for MODULE. If the # mu_cv_enable_shared cache variable is set to 'no' then the query will include # the --static option to pkg-config to also obtain the Libs.private needed for # static linking. # # If pkg-config is not available, or if the query fails, then set it to the # given DEFAULT value. The STATIC-EXTRA value will be appended to that in the # case where Libs.private would have been included as described above. # # For example: # ACM_PKG_CONFIG_GET_LIBS([FOO_LDFLAGS],[foo],[-rdynamic],[-pthread]) # # Will try to set FOO_LDFLAGS to the non-library options that are returned by # `pkg-config --libs foo` if possible, else fall back to a default of using # '-rdynamic'. Or for the case where static linking is configured, it will # try `pkg-config --static --libs foo`, falling back to '-rdynamic -pthread' # as the *_LDFLAGS needed for static linking. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_PKG_CONFIG_GET_LDFLAGS], [dnl AS_IF([test "$mu_cv_enable_shared" = no], [ACM_PKG_CONFIG_GET([$1],[$2],[static,libs],m4_join([ ],[$3],[$4]),[ACM_PC_LDFLAGS])], [ACM_PKG_CONFIG_GET([$1],[$2],[libs],[$3],[ACM_PC_LDFLAGS])]) ]) bit-babbler-0.9/Makeup/ac-fragments/pthread.m40000644000000000000000000001733114125243667016116 0ustar dnl Makeup aclocal macros for pthread support. dnl dnl Copyright 2015 - 2021, Ron dnl dnl These macros are distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. # ACM_FUNC_PTHREAD_SETNAME # # Check for pthread_setname_np. We can't just use AC_CHECK_FUNCS for this one # because it has different signatures on different platforms. Right now we # mostly care about distinguishing the GNU version which takes two parameters # and the MacOS one which only takes a name and must be called from the thread # where the name is to be set. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_FUNC_PTHREAD_SETNAME], [ ACM_PUSH_VAL([$0],[CPPFLAGS],[$PTHREAD_CPPFLAGS])dnl ACM_PUSH_VAL([$0],[LDFLAGS],[$PTHREAD_LDFLAGS])dnl AC_CACHE_CHECK([for GNU pthread_setname_np],[mu_cv_func_gnu_pthread_setname_np], [AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include ]], [[ pthread_setname_np(pthread_self(),"x"); ]] )], [mu_cv_func_gnu_pthread_setname_np=yes], [mu_cv_func_gnu_pthread_setname_np=no] ) ]) AS_IF([test "$mu_cv_func_gnu_pthread_setname_np" = yes], [AC_DEFINE([HAVE_PTHREAD_SETNAME_NP_GNU],[1], [Have GNU style pthread_setname_np(pthread_t thread, const char *name)])], [AC_CACHE_CHECK([for MacOS pthread_setname_np],[mu_cv_func_mac_pthread_setname_np], [AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include ]], [[ pthread_setname_np("x"); ]] )], [mu_cv_func_mac_pthread_setname_np=yes], [mu_cv_func_mac_pthread_setname_np=no] ) ]) ]) AS_IF([test "$mu_cv_func_mac_pthread_setname_np" = yes], [AC_DEFINE([HAVE_PTHREAD_SETNAME_NP_MAC],[1], [Have MacOS style pthread_setname_np(const char *name)]) ]) dnl OpenBSD and FreeBSD have pthread_set_name_np declared in pthread_np.h AS_IF([test "$mu_cv_func_gnu_pthread_setname_np" != yes && test "$mu_cv_func_mac_pthread_setname_np" != yes], [AC_CHECK_FUNCS([pthread_set_name_np]) ]) ACM_POP_VAR([$0],[LDFLAGS,CPPFLAGS])dnl ]) # ACM_CXX_FORCED_UNWIND # # Check if abi::__forced_unwind is supported. Code built with GCC will unwind # the stack by throwing a special exception of this type whenever a thread is # cancelled. Which was a great idiom until C++11 stuffed it all up by refusing # to allow exceptions to pass through destructors by default - which means now # it might just abruptly terminate the entire process instead ... But aside # from that, it's not supported by clang on every platform, and the ancient GCC # 4.2.1 20070719 on OpenBSD 6.1 doesn't support it either. So things which do # need to support those will need to explicitly check for it. # # The impact of it not being present can be minimised by defining the missing # type if needed with code like the example below. But each use of it would # still need to be checked to see if any other special handling is required in # the case where rather than unwinding cleanly, some thread simple ceases to # exist when it is cancelled. # # // If the thread cancellation exceptions are not supported, provide our own # // definition of the exception type. The main trick here is that we can't # // put it directly into namespace abi if that is really an alias to some # // other internal namespace name. If it's an alias to some other name than # // __cxxabiv1, then this will explode at build time and we'll need to add # // a new configure test for other possible aliases, but right now that is # // what is used on all platforms with the abi namespace so far. # #if !HAVE_ABI_FORCED_UNWIND # # #if HAVE_ABI_ALIAS_TO_CXXABIV1 # namespace __cxxabiv1 # #else # namespace abi # #endif # { # struct __forced_unwind {}; # } # # #endif # # The future usability of this unwinding idiom is currently something of an # open question while the GCC maintainers kick the can down the road, having # made their own extension inherently dangerous by switching the default C++ # standard to gnu++14 where destructors are implicitly noexcept, and not # showing a lot of enthusiasm for addressing the question of how to deal with # that now. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_CXX_FORCED_UNWIND], [ dnl The C++ stack unwinding exception for thread cancellation was a GNU extension dnl and though it is supported on some platforms by clang too, it isn't supported dnl on every platform that is using gcc either. So first, test if we have it. AC_CACHE_CHECK([for abi::__forced_unwind],[mu_cv_type_forced_unwind], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include ]], [[ void f(const abi::__forced_unwind&); ]] )], [mu_cv_type_forced_unwind=yes], [mu_cv_type_forced_unwind=no] ) ]) dnl If we don't, test if we at least have the "abi" namespace defined. AS_IF([test "$mu_cv_type_forced_unwind" = yes], [AC_DEFINE([HAVE_ABI_FORCED_UNWIND],[1], [Have abi::__forced_unwind support])], [AC_CACHE_CHECK([for namespace abi],[mu_cv_namespace_abi], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include ]], [[ using namespace abi; ]] )], [mu_cv_namespace_abi=yes], [mu_cv_namespace_abi=no] ) ]) ]) dnl If the "abi" namespace is defined, we still might not be able to place our dnl own replacement type into it, since that is an illegal construct if "abi" dnl is an alias to some other namespace. Right now, on all platforms we know dnl of, it is an alias to __cxxabiv1, so test for that, and use it if so. dnl If it's not, then either "abi" is a real namespace and we can just use it dnl directly (which is what we attempt if this test fails), or it's aliased to dnl some other internal namespace name - in which case the build should fail dnl and we'll need to add a new test for additional names if/when someone ever dnl hits that. AS_IF([test "$mu_cv_namespace_abi" = yes], [AC_CACHE_CHECK([for namespace abi alias to __cxxabiv1],[mu_cv_namespace_alias_abi], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include namespace __cxxabiv1 { struct xx {}; } ]], [[ abi::xx x; ]] )], [mu_cv_namespace_alias_abi=yes], [mu_cv_namespace_alias_abi=no] ) ]) ]) AS_IF([test "$mu_cv_namespace_alias_abi" = yes], [AC_DEFINE([HAVE_ABI_ALIAS_TO_CXXABIV1],[1], [Have namespace abi alias to __cxxabiv1]) ]) ]) bit-babbler-0.9/Makeup/ac-fragments/tr1.m40000644000000000000000000000777314125243667015206 0ustar dnl Makeup aclocal macros for C++03 TR1 compatibility support. dnl dnl Copyright 2017 - 2018, Ron dnl dnl These macros are distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. # ACM_TR1_TYPE_TRAITS # # Check what support we have for type_traits. Prior to C++11 they were part # of the tr1 extensions. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_TR1_TYPE_TRAITS], [ AC_CHECK_HEADERS([type_traits],[], [AC_CHECK_HEADERS([tr1/type_traits])]) ]) # ACM_TR1_UNORDERED_MAP # # Check what support we have for a hashed map type. We should almost never # need to fall back as far as the __gnu_cxx::hash_map anymore, but we are # still at that awkward stage in the transition between the tr1 implementation # and the standardised in C++11 one. For bonus fun, in some cases, like clang # in FreeBSD 10.3, the tr1 header is provided (as a symlink to the "standard" # one), but the type is not actually included in the std::tr1 namespace at all. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_TR1_UNORDERED_MAP], [ AC_CHECK_HEADERS([unordered_map],[], [AC_CHECK_HEADERS([tr1/unordered_map],[], [AC_CHECK_HEADERS([ext/hash_map])])]) ]) # ACM_TR1_UNORDERED_SET # # Check what support we have for a hashed set type. We should almost never # need to fall back as far as the __gnu_cxx::hash_set anymore, but we are # still at that awkward stage in the transition between the tr1 implementation # and the standardised in C++11 one. For bonus fun, in some cases, like clang # in FreeBSD 10.3, the tr1 header is provided (as a symlink to the "standard" # one), but the type is not actually included in the std::tr1 namespace at all. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_TR1_UNORDERED_SET], [ AC_CHECK_HEADERS([unordered_set],[], [AC_CHECK_HEADERS([tr1/unordered_set],[], [AC_CHECK_HEADERS([ext/hash_set])])]) ]) # ACM_TR1_HASH # # Check which hash type we need to use for unordered_map and unordered_set. # For the hash template type in the the header <{tr1/,}functional> we need to # do a bit more work because the header won't just error out in # the same way as unordered_{map,set} do above when the C++ standard in use is # too early. It will just silently not define the hash type we need. So check # that it is actually available explicitly. # ---------------------------------------------------------------------------- AC_DEFUN([ACM_TR1_HASH], [ AC_CACHE_CHECK([for std::hash],[mu_cv_type_std_hash], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[using std::hash;]]) ], [mu_cv_type_std_hash=yes], [mu_cv_type_std_hash=no] ) ]) AS_IF([test "$mu_cv_type_std_hash" = yes], [AC_DEFINE([HAVE_STD_HASH],[1], [The system provides std::hash in the header ])], [AC_CACHE_CHECK([for std::tr1::hash],[mu_cv_type_tr1_hash], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[using std::tr1::hash;]]) ], [mu_cv_type_tr1_hash=yes], [mu_cv_type_tr1_hash=no] ) ]) ]) AS_IF([test "$mu_cv_type_tr1_hash" = yes], [AC_DEFINE([HAVE_TR1_HASH],[1], [The system provides std::tr1::hash in the header ]) ]) ]) bit-babbler-0.9/Makeup/config/0002755000000000000000000000000014136173163013113 5ustar bit-babbler-0.9/Makeup/config/Package.conf0000644000000000000000000000106114136173163015311 0ustar PACKAGE_NAME = bit-babbler PACKAGE_VERSION = 0.9 PACKAGE_MAINTAINER = ron@debian.org PACKAGE_CXXSTD = -std=gnu++98 PACKAGE_TESTS = configure.stdtools configure.i18n \ configure.udev configure.systemd configure.sysctl PACKAGE_M4 = pthread.m4 tr1.m4 PACKAGE_CONFIG_HEADER = setup.h PACKAGE_DIST_TYPE = git PACKAGE_DIST_COMPRESS = gz PACKAGE_BUILD_ROOTCMD = fakeroot PACKAGE_INSTALL_ROOTCMD = sudo PACKAGE_TARGETS = seedd seedd-conf seedd-service udev-rules sysctl-conf \ bbctl bbcheck bbvirt vm-conf munin-script munin-conf man1 bit-babbler-0.9/Makeup/config/acfile.60-bit-babbler.rules0000644000000000000000000002110414136173163017775 0ustar # udev rules for bit-babbler devices. SUBSYSTEM!="usb", GOTO="bb_end" ACTION!="add|change|bind", GOTO="bb_end_add" # This is what we'd like to do. Skip all the rules here if Vendor:Product # is not 0403:7840 -- but that's not what these two tests will actually do # (at least with udev versions up to 232-25 which shipped in Stretch). # If the device we're handling an event for doesn't have the idVendor or # idProduct attributes at all, then these tests are still false, the same # as if they did have the values which we are testing for ... #ATTR{idVendor}!="0403", GOTO="bb_end" #ATTR{idProduct}!="7840", GOTO="bb_end" # So instead, we need to explicitly test that they *are* the values we are # looking for, and play goto leapfrog to get the control flow logic we want. ATTR{idVendor}=="0403", ATTR{idProduct}=="7840", GOTO="bb_add" GOTO="bb_end" LABEL="bb_add" # Create a symlink to a well known name that can be used for cgroup_device_acl # configuration in /etc/libvirt/qemu.conf, and for other similar purposes too. # We need to include this rule for bind events too, otherwise udev (as of the # 237 release at least) will remove the link that it created with the add event # when the bind event is processed. SYMLINK+="bitbabbler/$attr{serial}" # Don't run the rest of this for bind events, because normally they'll just # come immediately after the add event for these devices, and we don't need # to repeat all of this twice every time a device is added. ACTION!="add|change", GOTO="bb_end_add" # If the system group 'bit-babbler' exists, give users in it direct access to # the device. Otherwise only root will have access to the hardware itself. # # If that group doesn't exist, then by default udev will just ignore the rule # to set GROUP, and the call to setfacl will be a no-op, aside from reporting # those failures to syslog. If you don't want to add that group and enable # this feature, but do want to avoid the errors in the logs, the test below # can be enabled to skip them, but normally it's just a waste of cycles, and # one more thing that could go wrong on some system during early boot. # # We won't trust udev to negate this test either, so play leapfrog again. #PROGRAM="/usr/bin/getent group bit-babbler", GOTO="bb_set_group" #GOTO="bb_end_set_group" LABEL="bb_set_group" # Allow users in group bit-babbler to access the device directly. # If ACLs are supported, grant users in the bit-babbler group access to the device # with them too. This is mainly so that if a VM is halted, the device will revert # to normal access from the host system again. The libvirt 'managed' mode will not # restore the original ownership when it releases the device, it will just make it # be root:root, stomping the GROUP we set above. GROUP="bit-babbler" TEST=="/usr/bin/setfacl", RUN+="/usr/bin/setfacl -m g:bit-babbler:rw $devnode" LABEL="bb_end_set_group" # Limit access to owner and group, and run the bbvirt script to see if # this device was configured for hotplugging into a virtual machine. # # We now also only do this if the libvirt socket actually exists, which is an # imperfect workaround for a systemd race-to-deadlock introduced in Bullseye # by the libvirt package adding a "socket activation" unit instead of just # letting the daemon create and manage its own socket ... # # The boot-time race works like this: # - udev-trigger.service gets us here with an add event for BitBabbler devices # that are already in the machine being booted. We call `bbvirt attach` with # the device details which in turn does `virsh attach` if the device has been # assigned to a VM. # # - On a 'fast' machine with enough cores to handle the boot thundering herd, # or when startup dependencies are correctly declared and ordered, then the # libvirt socket will not have been created yet, this operation will fail # gracefully, and the QEMU hook will attach the device later when the VM it # is assigned to is actually started. # # If that takes a bit too long for any reason, then sloppy boot dependencies # may allow the libvirt socket to already be present. Prior to Bullseye (and # it being created by a .socket unit with no guarantee that there will ever # be a consumer for the socket), this would mean that the libvirt daemon was # running, and it would either perform our request or fail gracefully if the # VM itself had not been started yet (but that would not happen in practice # as its .service dependencies are not yet satisfied when this first runs, # and are typically on the other side of at least one ordering barrier). # # But in Bullseye with the libvirtd.socket being blindly created "as early as # possible" with no regard for if or when the daemon will actually be there # to handle requests, virsh connects to a zombie socket and then just waits # 'forever' for a response. Accessing the socket makes systemd request the # daemon service to start, but it cannot as its own dependencies are not yet # satisfied (and are unlikely to be in the boot stage where this first runs). # # - Meanwhile, in another part of the thundering herd, someone unrelated to us # calls udevadm settle to allow its initial set of expected devices to become # available if they are already in the queue, and the boot cannot proceed to # the next target until that has completed. The ifupdown-pre.service does # this, but any other early boot step could also reasonably do so. # # However our event is in that queue, and virsh will not stop hanging on the # zombie socket until it gets a response to shouting into the void. And it # will never get a response because the daemon which would give it cannot be # started until after the prerequisites scheduled after this deadlock are # satisfied. # # The boot hangs until systemd's "bug solver" simply times it all out, and # kills everything leaving the admin to Know His Own. Much sadness ensues # as the admin is now locked out of the remote machine because the network # services were an innocent victim of a deadlock black hole just waiting # for things to fall in it ... # # Testing for the socket here narrows the window in which the race can be lost # but cannot totally eliminate it. To properly fix this, the libvirt socket # creation needs to be deferred until there is a libvirtd process to handle # request to it. And separately to that the networking.service needs to not # refuse to start if ifupdown-pre.service "fails", because udevadm settle can # fail or deadlock or time out for many reasons not related to the network # devices not actually being there, so making a best effort to still start # them cannot be the wrong thing to do ... # # Ideally systemd should handle the transitive dependencies that the .socket # inherits which are required before the .service can start, but apparently # suggesting that is a heresy to the systemd cult who simply refuse to hear # it, still pretending that "ordering will sort itself out if you just start # all the sockets as early as possible", despite repeated reports of the real # deadlocks that result from sloppy handling of known ordering prerequisites. # So as usual, we'll just work around them until the problem actually happens # to them in a way they can't handwave off and see what else happens then ... MODE="0660", TEST=="@LIBVIRT_SOCKET@", \ RUN+="@EXP_BINDIR@/bbvirt --syslog attach $attr{serial} --busnum $attr{busnum} --devnum $attr{devnum}" # Enable USB autosuspend. The BitBabbler devices support suspending correctly, # though not every controller they might be plugged into will always play nicely. # It should be safe to enable it here, even if an upstream hub or controller # needs it disabled. The XHCI controllers seem to be the most troublesome, but # mostly with older kernels. TEST=="power/control", ATTR{power/control}="auto" TEST=="power/autosuspend_delay_ms", ATTR{power/autosuspend_delay_ms}="2000" LABEL="bb_end_add" ACTION!="remove", GOTO="bb_end" # Explicitly detach unplugged devices from the VM if they were passed through to it. # If we don't do this, the stale configuration will remain, and could # match some other completely different device that is plugged in later ... # This is why we can't make persistent changes to the domain definition for VMs that # aren't running when the device is plugged in, because if the host goes down without # this rule being run, we'd never clean those up. # # We can't test against the attributes here, if this would match they are already gone. ENV{ID_VENDOR_ID}=="0403", ENV{ID_MODEL_ID}=="7840", TEST=="@LIBVIRT_SOCKET@", \ RUN+="@EXP_BINDIR@/bbvirt --syslog detach $env{ID_SERIAL_SHORT} --busnum $env{BUSNUM} --devnum $env{DEVNUM}" LABEL="bb_end" bit-babbler-0.9/Makeup/config/acfile.bit-babbler-sysctl.conf0000644000000000000000000000036214136173163020667 0ustar # sysctl options for BitBabbler # These settings will be automatically applied at boot, or they can be # manually applied immediately with: # # sysctl -q -p @SYSCTL_DIR@/bit-babbler-sysctl.conf kernel.random.write_wakeup_threshold = 2048 bit-babbler-0.9/Makeup/config/acfile.seedd-wait.service0000644000000000000000000001061114136173163017742 0ustar # systemd sequence point for services requiring good initial seed entropy. [Unit] Description=Wait for initial kernel entropy seeding Documentation=man:seedd(1) DefaultDependencies=no After=seedd.service # Ordinarily, we want to block everything which might run after local-fs.target # until either we have good seed entropy, or know that we definitely won't be # getting it from seedd, or we time-out and give up waiting for it. But if this # (or anything else) failing lands us at the emergency.target, then systemd may # already consider the local-fs.target has been reached, so if people try to # enter a normal system mode again with `systemctl default` as it prompts them # to, then it won't block here anymore, and will start everything else up as if # this succeeded. But if this still fails then when the timeout expires, they # will suddenly and without explanation, be thrown back into emergency mode # again. Unless they did something like ssh in during that window, in which # case they'll unlock the achievement of being in single-user mode while being # logged in with multiple users simultaneously. # # So to avoid the cognitive dissonance of seeing that they have both tea and # no-tea, we need to set up a second roadblock at sysinit.target, which should # prevent starting most things which the emergency.target didn't itself start. # The only nasty part then is that syslog is disabled by emergency.target, so # it may be tricky to discover why they keep being thrown back into it, but # there's not a whole lot we can do here to solve that quirk of systemd. Before=local-fs.target sysinit.target # In theory this should probably be Requires=, since this will fail if seedd # is not running (though strictly speaking, an instance of seedd that is not # managed by systemd would still suffice) - but the main reason not to use a # Requires dependency here is so that this will not automatically be restarted # any time that seedd.service is. This really only needs to run once at boot, # and if there are other units which do block hard on this one with a Requires # dependency of their own, the restart would cascade all the way down through # those too - and they almost certainly should not be restarted (or stopped # completely!) just because seedd was. On the off-chance this is the desired # behaviour for some use case, it is still possible to edit this unit, or use # a drop-in to upgrade this relationship to Requires - but you should remember # that seedd will be automatically restarted if the package is upgraded, so it # would be unwise for that to trigger a restart of anything which would be Bad # if it happens in the middle of a dist-upgrade or similar. Wants=seedd.service # There should be no reason to ever start this manually, it only exists to # provide a sequence point during early boot, and a check that the kernel was # seeded with good entropy for things which require that. So starting it # manually after boot is too late for the former, and useless for the latter, # since other units need to bring it in themselves if they Want or Require it. # Likewise, as a oneshot, there is no reason to ever stop this manually, and # that could in fact be actively "harmful", since it would also stop any unit # which did Require it, even though what they actually require hasn't changed. RefuseManualStart=yes RefuseManualStop=yes # Another option for maximally paranoid systems would be something like the # following, which would put the system into single-user mode if we were unable # to seed the kernel sufficiently at boot. But you could also do something a # bit less aggressive in the same way, starting only a limited set of emergency # services (but more than just a single-user login) in that case. #OnFailure=emergency.target #OnFailureJobMode=replace-irreversibly [Service] Type=oneshot RemainAfterExit=yes # Wait for at least one QA checked block of bits to seed the OS kernel pool, # polling for that 4 times/sec, and reporting failure if it could not be done # in less than 30 seconds. Output enough verbosity to show in the system log # what we are doing and when it happens. ExecStart=@EXP_BINDIR@/bbctl -v --waitfor Kernel:2500:250:30k # Belt and braces, have systemd fail it if there was no result in 45 seconds. # We want to limit the worst case of preventing at least a minimal boot # proceeding to give admin access if something really went Terribly Wrong. TimeoutStartSec=45 [Install] WantedBy=seedd.service bit-babbler-0.9/Makeup/config/acfile.seedd.service0000644000000000000000000000074214136173163017004 0ustar # systemd unit for the seedd daemon. [Unit] Description=BitBabbler entropy source daemon Documentation=man:seedd(1) DefaultDependencies=no After=systemd-remount-fs.service Before=local-fs.target shutdown.target Conflicts=shutdown.target ConditionPathExists=/etc/bit-babbler/seedd.conf [Service] Type=notify ExecStart=@EXP_BINDIR@/seedd --config /etc/bit-babbler/seedd.conf KillMode=mixed CapabilityBoundingSet=CAP_CHOWN CAP_FOWNER CAP_SYS_ADMIN [Install] WantedBy=sysinit.target bit-babbler-0.9/Makeup/config/acsubst.bit-babbler0000644000000000000000000000016014136173163016641 0ustar include Makefile.acsubst.udev USB_CPPFLAGS = @USB_CPPFLAGS@ USB_LDFLAGS = @USB_LDFLAGS@ USB_LIBS = @USB_LIBS@ bit-babbler-0.9/Makeup/config/configure.bit-babbler0000644000000000000000000002350614136173163017167 0ustar dnl Makeup extra configuration for bit-babbler. dnl dnl Copyright 2003 - 2021, Ron Lee. dnl AC_LANG_PUSH([C++]) case $host in *-*-linux* ) ;; *-*-cygwin* | *-*-mingw32* ) dnl We don't have unix domain sockets on windows, so default to TCP there. AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], [SEEDD_CONTROL_SOCKET=tcp:localhost:56789]) dnl We need at least 0x0600 to get AI_ADDRCONFIG for getaddrinfo bb_cv_env_winver=0x0600 bb_cv_env__win32_winnt=0x0600 AC_DEFINE_UNQUOTED([WINVER], [$bb_cv_env_winver], [Select the MSW version to be compatible with]) AC_DEFINE_UNQUOTED([_WIN32_WINNT], [$bb_cv_env__win32_winnt], [The MSW NT version to be compatible with]) AC_MSG_NOTICE([using WINVER = '$bb_cv_env_winver', _WIN32_WINNT = '$bb_cv_env__win32_winnt']) dnl Testing for vasprintf has the opposite problem to what localtime_r does dnl as described below. It's included in the system library, so a link test dnl passes with a faked prototype, but an actual build fails because it is dnl not visible in stdio.h unless _GNU_SOURCE is defined. AC_DEFINE([_GNU_SOURCE],[1],[Include support for vasprintf et al.]) ;; *-*-openbsd* ) dnl The default pthread stack size on OpenBSD 6.1 is 512kB, so fix that. AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) AC_DEFINE([HAVE_BROKEN_STDIO_LOCKING],[1], [Workaround OpenBSD _thread_flockfile cancellation bug]) ;; *-*-freebsd* ) dnl The default pthread stack size on FreeBSD 11 is 2MB, so fix that. dnl So far we haven't actually had this smash the stack there with dnl the default size (unlike OpenBSD, MacOS and Windows), but let's dnl not wait until we do, just use the same size as everywhere else. AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) ;; *-*-darwin* ) dnl The default pthread stack size on MacOS is only 512kB, and we expect to dnl need more than that, so bring it into line with the normal Linux default. AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) ;; esac dnl /var could be a remote mount which isn't available at early boot when seedd dnl is first started, but /run is supposed to be ready before any ordinary early dnl boot process, even if it is a separate mount like a tmpfs, so default to it dnl unless we know it's not expected to be supported. FHS 3.0 allows /var/run dnl to be an alias to /run, and that is what most (but not all) Linux distros dnl currently do. The BSDs (aside from Debian's kFreeBSD port) aren't riding dnl this train yet though, so we still use /var/run there instead of rudely dnl creating a new directory in the root of people's systems. Aside from the dnl override for Windows above, we use SYSTEM_RUNDIR from configure.stdtools to dnl decide which to use here. AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], [SEEDD_CONTROL_SOCKET="$SYSTEM_RUNDIR/bit-babbler/seedd.socket"]) AC_ARG_VAR([SEEDD_CONTROL_SOCKET], [Set the default to use for the seedd control socket]) AS_IF([test -n "$SEEDD_CONTROL_SOCKET"],[ AC_DEFINE_UNQUOTED([SEEDD_CONTROL_SOCKET],["$SEEDD_CONTROL_SOCKET"], [Set the default to use for the seedd control socket]) ]) AC_ARG_VAR([THREAD_STACK_SIZE], [Explicitly set the per-thread stack size in kB (if non-zero)]) AS_IF([test -n "$THREAD_STACK_SIZE"],[ AC_DEFINE_UNQUOTED([THREAD_STACK_SIZE],[$THREAD_STACK_SIZE], [Explicitly set the per-thread stack size in kB (if non-zero)]) ]) AC_C_BIGENDIAN AC_CHECK_FUNCS([vasprintf]) dnl See if clock_gettime is available even without librt. On some systems dnl it is, and as of glibc 2.17 the clock_* functions moved from librt to dnl the main C library as well. We don't particularly want to depend upon dnl librt, for lots of reasons, but using this if we do have it is sane. dnl dnl Testing for localtime_r and gmtime_r in this way fails on mingw-w64 4.9.2 dnl even though it does actually have them when either of _POSIX_C_SOURCE or dnl _POSIX_THREAD_SAFE_FUNCTIONS are defined - because AC_CHECK_FUNCS tries to dnl link a faked prototype, but in the mingw time.h they are inline functions dnl only, wrapping the system localtime_s function. We could do a more complex dnl test here, but it's probably ok to just let them fall back to using the dnl non-reentrant versions, because on MSW, localtime and gmtime are in theory dnl implemented using thread-local storage, so they are thread-safe anyway. AC_CHECK_FUNCS([gettimeofday localtime_r gmtime_r timegm clock_gettime]) dnl Check if SIGRTMIN is available. MacOS 10.12.1 still doesn't have it dnl (though FreeBSD added support for it in version 7). AC_CHECK_DECLS([SIGRTMIN],[],[],[[#include ]]) dnl OpenBSD as of at least 6.1 doesn't provide this (though FreeBSD and MacOS dnl at least do) and it isn't required by POSIX.1-2008 (SuSv4 TC2 2016). AC_CHECK_DECLS([LOG_MAKEPRI],[],[],[[#include ]]) ACM_CXX_FORCED_UNWIND ACM_FUNC_PTHREAD_SETNAME ACM_TR1_UNORDERED_MAP ACM_CPP_PUSH_POP_DIAGNOSTIC_MACROS ACM_DEFINE_FUNCTION_ATTRIBUTE([noreturn],[BB_NORETURN]) ACM_DEFINE_FUNCTION_ATTRIBUTE([const], [BB_CONST]) ACM_DEFINE_FUNCTION_ATTRIBUTE([pure], [BB_PURE]) ACM_DEFINE_FUNCTION_ATTRIBUTE([cold], [BB_COLD]) ACM_DEFINE_FUNCTION_ATTRIBUTE([no_sanitize("float-divide-by-zero")], [BB_NO_SANITIZE_FLOAT_DIVIDE_BY_ZERO]) ACM_DEFINE_FUNCTION_ATTRIBUTE([no_sanitize("unsigned-integer-overflow")], [BB_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW]) ACM_DEFINE_PRINTF_FORMAT_ATTRIBUTE([BB_PRINTF_FORMAT]) ACM_DEFINE_STRFTIME_FORMAT_ATTRIBUTE([BB_STRFTIME_FORMAT]) ACM_DEFINE_STATEMENT_ATTRIBUTE([fallthrough],[BB_FALLTHROUGH],[],[],[do {} while(0)]) dnl The FreeBSD libusb3 provides compatibility for libusb-0.1, libusb-1.0 and dnl their own libusb2 interface, with libusb.so as the -dev link to it. dnl On Linux libusb.so usually points to libusb-0.1, which we don't want. dnl The header file is usually in $prefix/include/libusb-1.0 on linux, and in dnl the system include dir on FreeBSD. AC_ARG_VAR([LIBUSB_DIR], [Path for libusb (mostly for cross-compiling)]) AC_ARG_VAR([USB_CPPFLAGS], [Extra CPPFLAGS for libusb (mostly for cross-compiling)]) AC_ARG_VAR([USB_LDFLAGS], [Extra LDFLAGS for libusb (mostly for cross-compiling)]) AS_IF([test -n "$LIBUSB_DIR"],[ USB_CPPFLAGS="-I$LIBUSB_DIR/include $USB_CPPFLAGS" USB_LDFLAGS="-L$LIBUSB_DIR/lib $USB_LDFLAGS" ]) ACM_PUSH_VAL([$0],[CPPFLAGS],[$USB_CPPFLAGS])dnl AC_CHECK_HEADERS([libusb-1.0/libusb.h libusb.h],[break]) AS_IF([test "$ac_cv_header_libusb_1_0_libusb_h" = "yes"],[ libusb_header="" ],[test "$ac_cv_header_libusb_h" = "yes"],[ libusb_header="" ]) AS_IF([test -n "$libusb_header"],[ AC_DEFINE_UNQUOTED([LIBUSB_HEADER],[$libusb_header],[libusb header location]) ]) ACM_POP_VAR([$0],[CPPFLAGS])dnl ACM_PUSH_VAL([$0],[LDFLAGS],[$USB_LDFLAGS])dnl ACM_PUSH_VAR([$0],[LIBS])dnl AC_SEARCH_LIBS([libusb_init], [usb-1.0 usb], [ AC_DEFINE([HAVE_LIBUSB],[1],[libusb is available]) AS_CASE([$ac_cv_search_libusb_init], [-l*],[[USB_LIBS=${ac_cv_search_libusb_init#-l}]]) dnl We need the double quoting in the case above or the # in dnl the prefix removal breaks the macro expansion horribly. ],[ AC_MSG_WARN([libusb not found]) case $host in *-*-openbsd* ) ACM_ADD_MISSING_DEP([libusb1-1.0]) ;; *-*-kfreebsd* ) ACM_ADD_MISSING_DEP([libusb2-dev]) ;; * ) ACM_ADD_MISSING_DEP([libusb-1.0-0-dev]) ;; esac ]) dnl We need to test for these explicitly, because LIBUSB_API_VERSION won't do. dnl It wasn't bumped when libusb_strerror was added, and it just doesn't exist dnl at all in the FreeBSD libusb3, which does provide libusb_get_port_numbers dnl even though it currently just returns LIBUSB_ERROR_NOT_SUPPORTED ... dnl And that kids, is why we have autoconf. dnl As of FreeBSD 11, we also need to test for libusb_has_capability, since it dnl appears they added the hotplug support API and bumped the compatibility dnl version, but didn't actually add the capability test function ... AC_CHECK_FUNCS([libusb_strerror libusb_get_port_numbers libusb_has_capability]) ACM_POP_VAR([$0],[LIBS,LDFLAGS])dnl AC_SUBST([USB_LIBS]) AC_LANG_POP([C++]) ACM_CHECKPOINT_MISSING_DEPS dnl We defer failing out on this until here, because the most likely reason is dnl just that libusb itself is missing, which will be reported above, and that dnl is a more useful explaination to give if so. This should only trigger if dnl the lib is installed but the header is in some wacky place we didn't look. AS_IF([test -z "$libusb_header"],[ AC_MSG_ERROR([No libusb header file found]) ]) dnl We need this one in the udev rules to work around more systemd dumb-fuckery dnl that its cult members refuse to recognise the reality of yet. It seems a dnl dangerous value to hard-code, but libvirt essentially hard-codes it too ... dnl Hopefully the real problem will be addressed in a release or two's time and dnl then we can get rid of this workaround again. AC_ARG_VAR([LIBVIRT_SOCKET], [Path to the libvirtd unix control socket]) LIBVIRT_SOCKET="$SYSTEM_RUNDIR/libvirt/libvirt-sock" AC_MSG_NOTICE([Configured bit-babbler $PACKAGE_VERSION]) AC_MSG_NOTICE([ with udev: $mu_cv_with_udev]) AC_MSG_NOTICE([ SEEDD_CONTROL_SOCKET: $SEEDD_CONTROL_SOCKET]) AC_MSG_NOTICE([ LIBVIRT_SOCKET: $LIBVIRT_SOCKET]) AS_IF([test -n "$THREAD_STACK_SIZE"],[ AC_MSG_NOTICE([ THREAD_STACK_SIZE: $THREAD_STACK_SIZE]) ]) case $host in *-*-openbsd* ) AC_MSG_NOTICE([NOTE: On OpenBSD you will need to build this by using gmake,]) AC_MSG_NOTICE([ and you will need to have the bash package installed.]) ;; *-*-freebsd* ) AC_MSG_NOTICE([NOTE: On FreeBSD you will need to build this by using gmake,]) AC_MSG_NOTICE([ and you will need to have the bash package installed.]) ;; esac AC_CONFIG_FILES([munin/bit_babbler],[chmod +x munin/bit_babbler]) bit-babbler-0.9/Makeup/config/target.bbcheck0000644000000000000000000000060514136173163015703 0ustar include Makefile.acsubst.bit-babbler bbcheck_TYPE = EXECUTABLE bbcheck_LANGUAGE = C++ bbcheck_OBJS = bbcheck.o bbcheck_VPATHS = %.cpp,$(srcdir)/src bbcheck_INSTALLDIR = $(bindir) bbcheck_CPPFLAGS = $(PTHREAD_CPPFLAGS) -I$(srcdir)/include $(USB_CPPFLAGS) bbcheck_LDFLAGS = $(PTHREAD_LDFLAGS) $(USB_LDFLAGS) bbcheck_LIBS = $(USB_LIBS) $(UDEV_LIBS) bbcheck_PE_LDFLAGS = -Wl,--stack,8388608 bit-babbler-0.9/Makeup/config/target.bbctl0000644000000000000000000000103014136173163015401 0ustar bbctl_TYPE = EXECUTABLE bbctl_LANGUAGE = C++ bbctl_OBJS = bbctl.o bbctl_VPATHS = %.cpp,$(srcdir)/src bbctl_INSTALLDIR = $(bindir) bbctl_CPPFLAGS = -I$(srcdir)/include # We need msvcr110 for _create_locale in json.h # And we need to explicitly list any system libs that would otherwise use # symbols from msvcrt, else we'll end up linked to two crt libraries which # isn't likely to end well. CRT110_LIBS = stdc++ winpthread mingwex mingw32 msvcr110 bbctl_PE_LDFLAGS = -Wl,--stack,8388608 bbctl_PE_LIBS = wsock32 ws2_32 $(CRT110_LIBS) bit-babbler-0.9/Makeup/config/target.bbvirt0000644000000000000000000000023314136173163015607 0ustar bbvirt_TYPE = DATA bbvirt_DATA_SRCDIR = $(srcdir)/libvirt bbvirt_DATA_INSTALLDIR = $(bindir) bbvirt_DATA_FILES = bbvirt INSTALL_DATA = $(INSTALL_PROGRAM) bit-babbler-0.9/Makeup/config/target.man10000644000000000000000000000022114136173163015150 0ustar man1_TYPE = DATA man1_DATA_SRCDIR = $(srcdir)/doc/man man1_DATA_INSTALLDIR = $(mandir)/man1 man1_DATA_FILES = seedd.1 bbcheck.1 bbctl.1 bbvirt.1 bit-babbler-0.9/Makeup/config/target.munin-conf0000644000000000000000000000023214136173163016367 0ustar munin-conf_TYPE = DATA munin-conf_DATA_SRCDIR = $(srcdir)/munin munin-conf_DATA_INSTALLDIR = /etc/munin/plugin-conf.d munin-conf_DATA_FILES = bit-babbler bit-babbler-0.9/Makeup/config/target.munin-script0000644000000000000000000000206714136173163016756 0ustar munin-script_TYPE = DATA munin-script_DATA_SRCDIR = $(top_builddir)/munin munin-script_DATA_INSTALLDIR = $(datadir)/munin/plugins munin-script_DATA_FILES = bit_babbler INSTALL_DATA = $(INSTALL_PROGRAM) # Regenerate the plugin script if config.status or its source file changed. $(top_builddir)/munin/bit_babbler: $(srcdir)/munin/bit_babbler.in $(top_builddir)/config.status @echo -n " * Updating $@... " @cd $(top_builddir) && ./config.status $@ # This one is a little unconventional. We don't usually remove files created # by config.status in the normal clean target, but this one actually is a build # target, not a part of the build system, and if we don't remove it here, then # nothing else would either, aside from completely removing a build subdir that # it was generated in. So nothing would clean it if it's configured in-tree. do_clean_munin-script: @echo " Removing: munin/bit_babbler" @$(RM) "$(top_builddir)/munin/bit_babbler" @rmdir $(top_builddir)/munin 2>/dev/null || true clean_munin-script: do_clean_munin-script .PHONY: do_clean_munin-script bit-babbler-0.9/Makeup/config/target.seedd0000644000000000000000000000105714136173163015410 0ustar include Makefile.acsubst.bit-babbler seedd_TYPE = EXECUTABLE seedd_LANGUAGE = C++ seedd_OBJS = seedd.o seedd_VPATHS = %.cpp,$(srcdir)/src %.c,$(srcdir)/src seedd_INSTALLDIR = $(bindir) seedd_CPPFLAGS = $(PTHREAD_CPPFLAGS) -I$(srcdir)/include $(USB_CPPFLAGS) seedd_LDFLAGS = $(PTHREAD_LDFLAGS) $(USB_LDFLAGS) seedd_LIBS = $(USB_LIBS) $(UDEV_LIBS) # See the comment in target.bbctl for why we need CRT110_LIBS here. CRT110_LIBS = stdc++ winpthread mingwex mingw32 msvcr110 seedd_PE_LDFLAGS = -Wl,--stack,8388608 seedd_PE_LIBS = wsock32 ws2_32 $(CRT110_LIBS) bit-babbler-0.9/Makeup/config/target.seedd-conf0000644000000000000000000000022614136173163016330 0ustar seedd-conf_TYPE = DATA seedd-conf_DATA_SRCDIR = $(srcdir)/doc/config seedd-conf_DATA_INSTALLDIR = /etc/bit-babbler seedd-conf_DATA_FILES = seedd.conf bit-babbler-0.9/Makeup/config/target.seedd-service0000644000000000000000000000033014136173163017037 0ustar include Makefile.acsubst.systemd seedd-service_TYPE = DATA seedd-service_DATA_SRCDIR = $(top_builddir) seedd-service_DATA_INSTALLDIR = $(SYSTEMD_UNIT_DIR) seedd-service_DATA_FILES = seedd.service seedd-wait.service bit-babbler-0.9/Makeup/config/target.sysctl-conf0000644000000000000000000000030114136173163016557 0ustar include Makefile.acsubst.sysctl sysctl-conf_TYPE = DATA sysctl-conf_DATA_SRCDIR = $(top_builddir) sysctl-conf_DATA_INSTALLDIR = $(SYSCTL_DIR) sysctl-conf_DATA_FILES = bit-babbler-sysctl.conf bit-babbler-0.9/Makeup/config/target.udev-rules0000644000000000000000000000027414136173163016417 0ustar include Makefile.acsubst.udev udev-rules_TYPE = DATA udev-rules_DATA_SRCDIR = $(top_builddir) udev-rules_DATA_INSTALLDIR = $(UDEV_RULES_DIR) udev-rules_DATA_FILES = 60-bit-babbler.rules bit-babbler-0.9/Makeup/config/target.vm-conf0000644000000000000000000000020414136173163015662 0ustar vm-conf_TYPE = DATA vm-conf_DATA_SRCDIR = $(srcdir)/libvirt vm-conf_DATA_INSTALLDIR = /etc/bit-babbler vm-conf_DATA_FILES = vm.conf bit-babbler-0.9/Makeup/gmake-fragments/0002755000000000000000000000000014136173163014716 5ustar bit-babbler-0.9/Makeup/gmake-fragments/makefile.acsubst0000644000000000000000000000436214125243667020071 0ustar # @configure_input@ # # Cross Platform makefile boilerplate. # # Copyright 1999 - 2018, Ron Lee # # This file is distributed under the terms of the GNU GPL version 2. # # As a special exception to the GPL, it may be distributed without # modification as a part of a program using a makeup generated build # system, under the same distribution terms as the program itself. MAKEUP_TOP_CONFIG = $(top_srcdir)/Makeup/Makeup.conf MAKEUP_HOST_ARCH = @MAKEUP_HOST_ARCH@ MAKEUP_DEFAULT_LINKAGE = @MAKEUP_DEFAULT_LINKAGE@ MAKEUP_PLATFORM_HEADER = @MAKEUP_PLATFORM_HEADER@ MAKEUP_FLAVOUR_HEADER = @MAKEUP_FLAVOUR_HEADER@ # Install locations. DESTDIR = srcdir = @srcdir@ top_srcdir = @top_srcdir@ builddir = @builddir@ top_builddir = @top_builddir@ # autoconf 2.57 still does not set top_builddir if == builddir. ifeq ($(strip $(top_builddir)),) top_builddir = @builddir@ endif prefix = @prefix@ exec_prefix = @exec_prefix@ bindir = @bindir@ sbindir = @sbindir@ libdir = @libdir@ includedir = @includedir@ datarootdir = @datarootdir@ datadir = @datadir@ mandir = @mandir@ subdirs = @subdirs@ SYSTEM_RUNDIR = @SYSTEM_RUNDIR@ EXEEXT = @EXEEXT@ DSOEXT = @DSOEXT@ CC = @CC@ CXX = @CXX@ CPP = @CPP@ CXXCPP = @CXXCPP@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_DATA = @INSTALL_DATA@ LN_S = @LN_S@ AR = @AR@ RANLIB = @RANLIB@ LEX = @LEX@ YACC = @YACC@ WINDRES = @WINDRES@ WINRCFLAGS = @WINRCFLAGS@ ARFLAGS = @ARFLAGS@ YACCFLAGS = @YACCFLAGS@ LEXFLAGS = @LEXFLAGS@ XGETTEXT = @XGETTEXT@ XGETTEXT_ARGS = @XGETTEXT_ARGS@ MSGINIT = @MSGINIT@ MSGMERGE = @MSGMERGE@ MSGFMT = @MSGFMT@ ALL_LINGUAS = @ALL_LINGUAS@ GETTEXT_MSG_SRC = @GETTEXT_MSG_SRC@ LIBINTL = @LIBINTL@ LIBICONV = @LIBICONV@ LCOV = @LCOV@ GENHTML = @GENHTML@ CPPFLAGS = @CPPFLAGS@ CFLAGS = @CFLAGS@ CXXFLAGS = @CXXFLAGS@ LDFLAGS = @LDFLAGS@ HOST_PICFLAGS = @HOST_PICFLAGS@ PICFLAGS = @PICFLAGS@ LIBS = @LIBS@ PTHREAD_CPPFLAGS = @PTHREAD_CPPFLAGS@ PTHREAD_LDFLAGS = @PTHREAD_LDFLAGS@ EXTRACPPFLAGS = @EXTRACPPFLAGS@ EXTRACFLAGS = @EXTRACFLAGS@ EXTRACXXFLAGS = @EXTRACXXFLAGS@ EXTRALDFLAGS = @EXTRALDFLAGS@ EXTRAYACCFLAGS = @EXTRAYACCFLAGS@ EXTRALEXFLAGS = @EXTRALEXFLAGS@ EXTRALIBS = @EXTRALIBS@ bit-babbler-0.9/Makeup/gmake-fragments/makefile.fstools0000644000000000000000000005013414136173163020107 0ustar # makeup filesystem tools. # # This file contains commonly reusable makefile portions for # performing operations on the file system in a consistent manner. # # Copyright 2004 - 2017, Ron # # This file is distributed under the terms of the GNU GPL version 2. # # As a special exception to the GPL, it may be distributed without # modification as a part of a program using a makeup generated build # system, under the same distribution terms as the program itself. # The boilerplate macros here are divided into two preliminary # sets, MACRO_Function's and MACRO_Template's. Macro _Function's # are aliases for a set of commands in a rule. They are a make # friendly way to provide the equivalent of parametric shell # functions for common commands in makefile rules. # # Macro _Template's are parametric boilerplate for complete # makefile rules. They must be passed to GNU make's $(eval) # function to be parsed as rules for manipulating a target. # # Macro _Template_Function's are like MACRO_Functions except they # have an additional level of internal escaping for shell variables # on the expectation that they will be expanded twice before being # presented to the shell. # # Function macros typically should not rely on the user to provide # their final termination, since a) You may never really know what # that should be. b) If the macro expands to nothing, you don't # have to worry about what to do with a hanging ';' or similar. # # Shell commands that always expect at least one command can # terminate a single $(call SOME_Function,...):; as shown. EMPTY = WHITESPACE = $(EMPTY) $(EMPTY) # Debugging # ------------------------------------------------------------------ # $(VERBOSE_ANNOUNCE) # May be used as a command to enable simple tracing through makefile rules. # You should define MAKEUP_VERBOSE to be non-null to turn up the noise. define VERBOSE_ANNOUNCE @if [ -n "$(MAKEUP_VERBOSE)" ]; then echo "--> Updating target: $@"; fi; endef # Directory manipulation. # ------------------------------------------------------------------ # $(call CREATE_DIR_Function,_dirname) # Test for the existance of _dirname and create it (along with # any missing intermediate directories) if neccessary. define CREATE_DIR_Function if [ ! -e "$(1)" ]; then \ echo " Creating dir: $(1)"; \ mkdir -p "$(1)"; \ fi; endef # $(call CREATE_DIRS_Function,_dirnames) # Test for the existance of all directories in _dirnames and create them # (along with any missing intermediate directories) if neccessary. define CREATE_DIRS_Function for d in $(1); do \ $(call CREATE_DIR_Function,$$d) \ done; endef # File manipulation. # ------------------------------------------------------------------ # $(call SRCPATH_Function,_path) # Will strip the '$srcdir/' component from _path to return a file path # that is relative to the top source dir. SRCPATH_Function = $(subst $(top_srcdir)/,,$(1)) # $(call STAMP_Function,_stamp) # Will touch _stamp, creating it and any missing path components. define STAMP_Function if [ -n "$(1)" ]; then \ stamp_dir="$$(dirname $(1))"; \ $(call CREATE_DIR_Function,$$stamp_dir) \ touch "$(1)"; \ fi; endef # $(call REMOVE_STAMP_Function,_stamp) # Will remove _stamp and prune any empty path components that remain. define REMOVE_STAMP_Function if [ -n "$(1)" ]; then \ stamp_dir="$$(dirname $(1))"; \ $(RM) "$(1)"; \ if [ -e "$$stamp_dir" ]; then \ rmdir -p "$$stamp_dir" > /dev/null 2>&1 || true; \ fi; \ fi; endef # $(call INSTALL_DATA_Function,_files,_installdir,_srcdir,_label) # Helper function to install a list of _files, initially rooted at _srcdir # to _installdir. The _label is a purely informative tag to show to the # user during installation. define INSTALL_DATA_Function set -e; \ if [ -n "$(1)" ]; then \ for f in $(1); do \ dest="$(2)/$$(dirname $$f)"; \ destdir="$${dest%%/.}"; \ $(call CREATE_DIR_Function,$${destdir}) \ echo " Installing $(4) : $$f"; \ $(INSTALL_DATA) "$(3)/$$f" "$${destdir}"; \ done; \ fi; endef # $(call INSTALL_DATA_Template_Function,_files,_installdir,_srcdir,_label) # This is the equivalent of INSTALL_DATA_Function for use within templates. define INSTALL_DATA_Template_Function set -e; \ if [ -n "$(1)" ]; then \ for f in $(1); do \ dest="$(2)/$$$$(dirname $$$$f)"; \ destdir="$$$${dest%%/.}"; \ $(call CREATE_DIR_Function,$$$${destdir}) \ echo " Installing $(4) : $$$$f"; \ $(INSTALL_DATA) "$(3)/$$$$f" "$$$${destdir}"; \ done; \ fi; endef # $(call INSTALL_DATA_RECURSIVE_Function,_files,_installdir,_srcdir,_label) # Helper function to install a list of _files, initially rooted at _srcdir # to _installdir. The _label is a purely informative tag to show to the # user during installation. This is identical to INSTALL_DATA_Function, # except it may also include directories in the list of _files. They will # be copied to the installdir preserving any substructure, except for hidden # files which will be treated according to the globbing policy of the local # shell (which is usually to ignore them) unless explicitly named in _files. # # Note the subshell which keeps scope for f and i. define INSTALL_DATA_RECURSIVE_Function set -e; \ ( set -e; \ do_install() { \ if [ -z "$$1" ]; then return; fi; \ ( for f in $$1; do \ if [ -d "$$3/$$f" ]; then \ echo " $${4}Installing $(4) subdir : $$f"; \ for i in $$(cd "$$3"/"$$f" && ls -d * 2> /dev/null); do \ do_install "$$i" "$$2/$$f" "$$3/$$f" ". $$4"; \ done; \ else \ dest="$$2/$$(dirname $$f)"; \ destdir="$${dest%%/.}"; \ $(call CREATE_DIR_Function,$${destdir}) \ echo " $${4}Installing $(4) : $$f"; \ $(INSTALL_DATA) "$$3/$$f" "$${destdir}"; \ fi; \ done; \ ) \ }; \ do_install "$(1)" "$(2)" "$(3)"; \ ) endef # $(call MERGE_FILES_Function,_files,_srcdir,_slavedir,_stamp,_timeout) # Merge two file trees # # Parameters: # $1 - list of filenames to merge # $2 - file source (master) dir # $3 - file destination (slave) dir # $4 - slave stamp file # $5 - user input timeout. # # If the slave stamp file name is provided files will only be merged if the # master is both different from the slave and newer than the slave stamp file. # (So you can refuse a diff and not be notified again if something unrelated # changes). If the optional _timeout is supplied, prompts for user input will # wait up to that many seconds before defaulting to ignore the action prompted # for. If not supplied, such prompts will wait indefinitely. # # We need to always run this one under bash. We may use a timeout in read, # which isn't really replaceable. and so while we are stuck with it, we may # as well also keep using the pattern substitution, and test -nt, which dash # manages to break in the most amazingly braindead way you could never imagine. # # FIXME: Check for diff and use PAGER instead of hard coded less. define MERGE_FILES_Function if [ -z "$$BASH" ]; then \ echo "$@: Uh Oh, trouble in $(THIS_MAKEFILE)"; \ echo "$@: dash is too retarded to run MERGE_FILES_Function for $(1)"; \ echo "$@: you need to set SHELL = bash for this target"; \ echo; \ exit 1; \ fi; \ ( \ if [ -n "$(1)" ]; then \ if [ -d "$(2)" ] && [ -d "$(3)" ]; then \ if [ -n "$(5)" ]; then \ timeout="-t $(5)"; \ else \ timeout=''; \ fi; \ for f in $(1); do \ if [ -n "$(4)" ]; then \ merge_stamp="$(4)$${f/\//_}"; \ fi; \ if [ ! -e "$(3)/$$f" ]; then \ echo " ++ Adding: $(patsubst ./%,%,$(3))/$$f"; \ cp -a "$(2)/$$f" "$(3)/$$f"; \ elif [ "$(2)/$$f" -nt "$$merge_stamp" ]; then \ if ! diff "$(2)/$$f" "$(3)/$$f" > /dev/null 2>&1 ; then \ merge_done=0; \ diff_done=0; \ while [ $$merge_done -ne 1 ]; do \ echo -n "$$f has changed. Diff, Ignore or Replace ";\ if [ $$diff_done -ne 1 ]; then \ echo -n "[D/i/r] : "; \ else \ echo -n "[d/I/r] : "; \ fi; \ merge_cmd=''; \ if ! read $$timeout merge_cmd; then \ echo "No response - Ignored."; \ merge_cmd=Ignore; \ fi; \ if [ -z "$$merge_cmd" ]; then \ if [ $$diff_done -ne 1 ]; then \ merge_cmd=Diff; \ else \ merge_cmd=Ignore; \ fi; \ fi; \ case "$$merge_cmd" in \ D*|d*) \ VIMDIFF=/usr/bin/vimdiff; \ if [ -x $$VIMDIFF ]; then \ $$VIMDIFF "$(2)/$$f" "$(3)/$$f"; \ else \ diff -u "$(2)/$$f" "$(3)/$$f" | less; \ fi; \ diff_done=1; \ ;; \ I*|i*) \ $(call STAMP_Function,$$merge_stamp) \ merge_done=1; \ ;; \ R*|r*) \ cp -a "$(2)/$$f" "$(3)/$$f"; \ $(call REMOVE_STAMP_Function,$$merge_stamp) \ merge_done=1; \ ;; \ esac; \ done; \ else \ if [ -n "$(MAKEUP_VERBOSE)" ] ; then \ echo " $(2)/$$f has no new content to merge."; \ fi; \ $(call STAMP_Function,$$merge_stamp) \ fi; \ elif [ -n "$(MAKEUP_VERBOSE)" ] ; then \ echo " $(2)/$$f is suppressed by ignore stamp."; \ fi; \ done; \ else \ echo "*** attempt to merge non existent dirs"; \ echo " $(2)"; \ echo " $(3)"; \ false; \ exit; \ fi; \ fi; \ ); endef # Goals, and where to score them # ------------------------------------------------------------------ ifneq ($(findstring Y,$(MAKEUP_VERBOSE)),) MAKEUP_EXTRA_DEBUG = --debug=v,i,m endif # $(call NEXT_GOAL_Function, _file, _target) # This function will try to create _file if it does not exist # and then attempt to make the goal _target from it. # # Note that we strip any leading ./ from a makefile name before # invoking it, since in gmake 3.80 nothing other than a pattern # starting with % will match it when it tries to update itself. # If invoked via: make -f ./foo/bar, attempts to update the # makefile itself will match "%/foo/bar :", or "%/bar :", but not # "./foo/bar :", nor "./foo/% :", nor even "$(THIS_MAKEFILE) :". define NEXT_GOAL_Function if [ -n '$(MAKEUP_VERBOSE)' ]; then \ echo "[$(MAKELEVEL)] requires $(2) from $(1)"; \ fi; \ if [ ! -e $(1) ]; then \ $(MAKE) $(MAKEUP_EXTRA_DEBUG) --no-print-directory \ -f $(SYS_MAKEUP_GMAKE_DIR)/makefile.makeup $(1); \ fi; \ $(MAKE) $(MAKEUP_EXTRA_DEBUG) --no-print-directory -f $(patsubst ./%,%,$(1)) $(2); endef # $(eval $(GET_KBUILD_TARGETS_Template)) # Scans the config for KBUILD targets, building a list of target names in # KBUILD_TARGETS, an array of KBUILD_TARGET_DIR_$(target) paths, and a list # if the target config files in KBUILD_TARGET_FILES. It will warn (and not # add) a target if its $(target)_DIR parameter is not defined. # # We do a fair bit of work here, so if this proves to be slow with long lists # of target files, then it shouldn't be too hard to cache this data in a file # and only update it when the target files change, but for now keep it 'simple' define GET_KBUILD_TARGETS_Template $(if $(KBUILD_TARGET_FILES), $(warning WARNING: KBUILD_TARGET_FILES already set) \ $(eval KBUILD_TARGET_FILES =)) KBUILD_TARGETS = $(strip \ $(foreach f,$(wildcard $(MAKEUP_CONFIG_DIR)/target.*), \ $(foreach t,$(patsubst $(MAKEUP_CONFIG_DIR)/target.%,%,$(f)),\ $(if $(shell grep -qE "^[[:space:]]*$(t)_TYPE[[:space:]]*[: ]=[[:space:]]*KBUILD" $(f)\ && echo "got-one"), \ $(eval d = $(shell awk -F"([ \t]*=[ \t]*|[ \t]+$$)" '$$1 == p {print $$2}' p=$(t)_DIR $(f)))\ $(if $(d), $(eval KBUILD_TARGET_DIR_$(t) = $(d)) \ $(eval KBUILD_TARGET_FILES += $(f)) $(t), \ $(warning WARNING: $(f): KBUILD target without $(t)_DIR)))))) endef # Standard rule templates for mapping system makeup to a local project # -------------------------------------------------------------------- # $(eval $(IMPORT_MAKEUP_RULE_Template)) # This template creates an implicit rule to update makeup boilerplate # from a system installation. define IMPORT_MAKEUP_RULE_Template $(MAKEUP_DIR)/% : $(SYS_MAKEUP_DIR)/% $$(VERBOSE_ANNOUNCE) @( set -e; \ if [ -e "$$<" ] ; then \ $$(call CREATE_DIR_Function,$$(@D)) \ $$(call MERGE_FILES_Function,$$*,$(SYS_MAKEUP_DIR),$(MAKEUP_DIR),$(MAKEUP_STAMP_DIR)/ignored-update--) \ elif [ "$$( $(3).new; \ then \ mv $(3).new $(3); \ else \ rm -f $(3).new; \ fi; endef # $(call APPEND_PARAM_VAL_Function,_param,_value,_file) # Similar to SET_PARAM_Function, but it will append _value to _param # if it already exists in _file instead of stomping its current value. define APPEND_PARAM_VAL_Function if awk 'BEGIN { FS = "([ \t]*=[ \t]*|[ \t]+$$)"; OFS = " = "; \ replaced = "no" }; \ $$1 == PARAM { $$0 = $$0 " " VALUE; replaced = "yes"; }; \ { print $$0 }; \ END { if ( replaced == "no" ) print PARAM " = " VALUE; }' \ PARAM="$(1)" VALUE="$(2)" $(3) > $(3).new; \ then \ mv $(3).new $(3); \ else \ rm -f $(3).new; \ fi; endef # $(call REMOVE_PARAM_VAL_Function,_param,_value,_file) # Remove any portion matching _value from the assignment to _param # in _file. define REMOVE_PARAM_VAL_Function if awk 'BEGIN { FS = "([ \t]*=[ \t]*|[ \t]+$$)"; OFS = " = " }; \ $$1 == PARAM { for( i = 2; i <= NF; i++ ) \ { sub(VALUE, "", $$i); \ gsub(/ +/, " ", $$i); \ }; \ }; \ { print $$0 };' \ PARAM="$(1)" VALUE="$(2)" $(3) > $(3).new; \ then \ mv $(3).new $(3); \ else \ rm -f $(3).new; \ fi; endef # $(call COMMENT_DEAD_PARAMS_Function,_params,_file) # Will comment out any parameter that is NOT in the _params list. # Mainly used when changing some option which is likely to invalidate # some or all preexisting parameters. We could just nuke them, but this # is probably better than losing them due to a keyboard or brain fumble. # We also could just leave them there and mostly ignore them, which is what # we used to do, but that's suboptimal too and could come back to bite us. define COMMENT_DEAD_PARAMS_Function if awk 'BEGIN { FS = "([ \t]*=[ \t]*|[ \t]+$$)"; OFS = " = "; } \ $$1 !~ /(^[ \t]*#.*)$(subst $(WHITESPACE),,$(foreach p,$(1),|(^$(p))))/ \ { $$0 = "# " $$1 " = " $$2; }; \ { print $$0 }' \ $(2) > $(2).new; \ then \ mv $(2).new $(2); \ else \ rm -f $(2).new; \ fi; endef # Makeup interactive help support # ------------------------------------------------------------------ # $(eval $(MAKEUP_HELP_Template)) # Create targets to support 'make help' and 'make help-foo' for # inline makeup documentation. define MAKEUP_HELP_Template help : @( echo; \ echo " Help is available for the following topics:"; \ echo; \ $$(foreach t,$$(MAKEUP_HELP),printf " %-10s - %s\n" "$$(t)" "$$(MAKEUP_HELP_BRIEF_$$(t))";) \ echo; \ echo "Use 'make help-' for further information."; \ echo; \ ) help-% : force-update $$(VERBOSE_ANNOUNCE) @$$(if $$(MAKEUP_HELP_$$*), \ echo; \ echo " --- Makeup $$* ---"; \ echo; \ $$(MAKEUP_HELP_$$*); \ echo;, \ echo; \ echo " No help for: '$$*'"; \ echo " Use 'make help' for a list of topics."; \ echo; \ ) delegatedhelp-% : force-update $$(VERBOSE_ANNOUNCE) @$$(if $$(MAKEUP_HELP_$$*), \ $$(MAKEUP_HELP_$$*); \ echo;, \ echo " No help for: '$$*' in $(THIS_MAKEFILE)"; \ echo " Please report this to Ron ."; \ echo " Use 'make help' for a list of topics."; \ echo; \ ) endef bit-babbler-0.9/Makeup/gmake-fragments/makefile.i18n0000644000000000000000000000761414125243667017207 0ustar # makeup internationalisation stub, includes rules for building # gettext style .mo and .po files. # # Copyright 1999 - 2021, Ron # # This file is distributed under the terms of the GNU GPL v2. override THIS_MAKEFILE := $(lastword $(MAKEFILE_LIST)) ifneq ($(strip $(MAKEUP_VERBOSE)),) $(warning goal[$(MAKELEVEL)] is $(MAKECMDGOALS)...) endif ifndef top_srcdir include Makefile.acsubst endif # Usually defined in makefile.acsubst. But may be overridden. ifndef MAKEUP_TOP_CONFIG MAKEUP_TOP_CONFIG = $(top_srcdir)/Makeup/Makeup.conf endif include $(MAKEUP_TOP_CONFIG) ifneq ($(strip $(PACKAGE_CONF)),) -include $(PACKAGE_CONF) endif include $(MAKEUP_GMAKE_DIR)/makefile.fstools localedir = $(srcdir)/locale allmo : $(VERBOSE_ANNOUNCE) @+( set -e; \ for d in $(ALL_LINGUAS) ; do \ echo ; \ echo "--> Examining locale '$$d'" ; \ domains="$(basename $(notdir $(wildcard $(localedir)/*.pot)))"; \ for n in $${domains:=$(PACKAGE_NAME)} ; do \ $(call NEXT_GOAL_Function,$(THIS_MAKEFILE), \ locale/$$d/LC_MESSAGES/$$n.mo) \ done; \ done; \ ) allpo : $(VERBOSE_ANNOUNCE) @+( set -e; \ for d in $(ALL_LINGUAS) ; do \ echo ; \ echo "--> Examining $(localedir)/$$d/LC_MESSAGES/$(PACKAGE_NAME).po"; \ $(call NEXT_GOAL_Function,$(THIS_MAKEFILE), \ $(localedir)/$$d/LC_MESSAGES/$(PACKAGE_NAME).po) \ done; \ ) $(localedir)/$(PACKAGE_NAME).pot : $(shell find $(GETTEXT_MSG_SRC) -name "*.h" -o \ -name "*.cc" -o \ -name "*.cpp" ) $(VERBOSE_ANNOUNCE) @if [ -z "$(XGETTEXT)" ]; then \ echo "*** xgettext is not installed/enabled. aborting"; \ echo ; \ exit 1; \ fi @( set -e; \ echo " * Generating $(PACKAGE_NAME).pot message catalog template"; \ $(call CREATE_DIR_Function,$(localedir)) \ $(XGETTEXT) --copyright-holder="$(PACKAGE_MAINTAINER)" \ --msgid-bugs-address="$(PACKAGE_MAINTAINER)" \ $(XGETTEXT_ARGS) $^ -o $@ ; \ ) define POFILE_Template $(localedir)/$(1)/LC_MESSAGES/%.po : $(localedir)/%.pot $$(VERBOSE_ANNOUNCE) @( set -e ; \ if ! test -f $$< ; then \ echo "*** $$< is not a file. aborting" ; \ echo ; \ exit 1 ; \ fi; \ if [ -z "$(MSGMERGE)" ]; then \ echo "*** msgmerge is not installed/enabled. aborting"; \ echo ; \ exit 1; \ fi; \ if [ -z "$(MSGINIT)" ]; then \ echo "*** msginit is not installed/enabled. aborting"; \ echo ; \ exit 1; \ fi; \ ) @( set -e ; \ if test -f $$@ ; then \ echo ; \ echo -n " Merging changes from $$< message catalog template..." ;\ $(MSGMERGE) $$@ $$< > $$@.new && mv $$@.new $$@ ; \ else \ echo ; \ $(call CREATE_DIR_Function,$$(@D)) \ echo -n " New locale: " ; \ $(MSGINIT) --no-translator -i $$< -o $$@ -l $(1) ; \ fi; \ ) endef define MOFILE_Template locale/$(1)/LC_MESSAGES/%.mo : $(localedir)/$(1)/LC_MESSAGES/%.po $$(VERBOSE_ANNOUNCE) @( set -e; \ echo ; \ if [ -z "$(MSGFMT)" ]; then \ echo "*** gettext is not enabled or msgfmt is not installed. aborting";\ echo ; \ exit 1; \ fi; \ ) @( set -e; \ echo " $$@ <--- $$<" ; \ if test -e $$< ; then \ $(call CREATE_DIR_Function,$$(@D)) \ echo -n " " ; \ $(MSGFMT) --verbose -c -o $$@ $$< ; \ fi; \ if test -f $$@ ; then \ echo " Ok." ; \ echo ; \ else \ echo "*** Failed ***" ; \ echo ; \ exit 1; \ fi; \ ) endef $(eval $(foreach lingua,$(ALL_LINGUAS), \ $(call POFILE_Template,$(lingua)) \ $(call MOFILE_Template,$(lingua)))) .SECONDARY : .PHONY : allmo allpo bit-babbler-0.9/Makeup/gmake-fragments/makefile.makeup0000644000000000000000000013710514125243667017711 0ustar # makeup core targets. # # Copyright 2003 - 2021, Ron # # This file is distributed under the terms of the GNU GPL version 2. # # As a special exception to the GPL, it may be distributed without # modification as a part of a program using a makeup generated build # system, under the same distribution terms as the program itself. override THIS_MAKEFILE := $(lastword $(MAKEFILE_LIST)) MAKEUP_DIR = $(top_srcdir)/Makeup PACKAGE_CONFIG_HEADER = setup.h # More ways to dodge the "eval breaks in a conditional" bug of GNU make 3.80 ifneq ($(strip $(MAKEUP_VERBOSE)),) $(warning goal[$(MAKELEVEL)] is '$(MAKECMDGOALS)' ...) MORE_MAKEUP := include else MORE_MAKEUP := -include endif ifndef top_srcdir include Makefile.acsubst endif # Usually defined in makefile.acsubst. But may be overridden. ifndef MAKEUP_TOP_CONFIG MAKEUP_TOP_CONFIG = $(top_srcdir)/Makeup/Makeup.conf endif $(foreach conf,$(if $(MAKEUP_VERSION),,$(MAKEUP_TOP_CONFIG)),$(eval $(MORE_MAKEUP) $(conf))) $(foreach path,$(MAKEUP_GMAKE_DIR),$(eval $(MORE_MAKEUP) $(path)/makefile.fstools)) ifneq ($(findstring sys-install,$(MAKECMDGOALS)),) # sys-install does not need any of the PACKAGE_ attributes # so ignore them early. PACKAGE_CONF = endif $(if $(findstring makefile.fstools,$(MAKEFILE_LIST)), \ $(foreach conf,$(PACKAGE_CONF),$(eval $(MORE_MAKEUP) $(conf)))) ifneq ($(strip $(MAKEUP_VERBOSE)),) $(foreach f,$(MAKEFILE_LIST),$(warning goal[$(MAKELEVEL)] using: $(f))) endif # $(eval $(call JUST_INCLUDE,_file)) # # Include a makefile snippet, without trying to update that file via the default # catchall rules. There must not be any other rule to create or update that file. define JUST_INCLUDE include $(1) $(1) : ; endef # $(call DELEGATE_TARGET,makefile[,target][,alternate_action]) # # If makefile cannot be found the alternate_action will be executed. # As a special case, if alternate_action is 'Fatal', then the build # will fail if makefile is not found. define DELEGATE_TARGET if [ "$(origin NEXT_GOAL_Function)" != "file" ] ; then \ echo "*** NEXT_GOAL_Function is not defined"; \ exit 1; \ fi; \ if ! ( false; $(call NEXT_GOAL_Function,$(MAKEUP_GMAKE_DIR)/$(1),$(2)) ); then \ if [ "$(3)" = "Fatal" ]; then \ echo; \ echo "Failed to make target: $(2)"; \ if [ ! -e "$(MAKEUP_GMAKE_DIR)/$(1)" ]; then \ echo; \ echo "You do not have $(1) installed."; \ echo; \ echo "Restore your source from the package tarball, contact"; \ echo "the package maintainer of this source about the problem,"; \ echo "or contact Ron for your own copy of the"; \ echo "ever so time saving Makeup utilities."; \ fi; \ echo; \ exit 1; \ else \ $(3) :; \ fi; \ fi; endef # $(call DELEGATE_HELP,makefile[,target][,alternate_action]) # # If makefile cannot be found the alternate_action will be executed. # If alternate_action is empty, then the build will fail with a default # message in that case. define DELEGATE_HELP if [ "$(origin NEXT_GOAL_Function)" != "file" ] ; then \ echo "*** NEXT_GOAL_Function is not defined"; \ exit 1; \ fi; \ if ! ( false; $(call NEXT_GOAL_Function,$(MAKEUP_GMAKE_DIR)/$(1),delegated$(2)) ); then \ if [ -z "$(3)" ]; then \ echo; \ echo "Unable to locate documentation for: $(2)"; \ if [ ! -e "$(1)" ]; then \ echo; \ echo "You do not have $(1) installed."; \ echo; \ echo "Restore your source from the package tarball, contact"; \ echo "the package maintainer of this source about the problem,"; \ echo "or contact Ron for your own copy of the"; \ echo "ever so time saving Makeup utilities."; \ fi; \ echo; \ exit 1; \ else \ $(3) :; \ fi; \ fi; endef # $(call MAKEUP_FOR_EACH,_targets[,_rule]) # This function will (re)call the current makefile for each of the # (space-separated) _targets, optionally prepending them with _rule # to execute any of the standard prefix rules (clean-, install-, etc.) # The default action (empty _rule) is to build all _targets. define MAKEUP_FOR_EACH for t in $(1); do \ set -e; \ echo; \ echo " ----- $(2) Target: $$t"; \ if [ -n "$(2)" ]; then \ t="$(2)-$$t"; \ fi; \ $(call NEXT_GOAL_Function,$(THIS_MAKEFILE),$$t) \ done; endef # $(call MAKEUP_FOR_TARGET,_target[,_rule]) # This function will attempt to locate a configuration file for _target # and perform _rule on it. The default action (empty _rule) is to # build _target. If no configuration file exists for it, then no action # will be taken to modify _target. define MAKEUP_FOR_TARGET if [ -r "$(MAKEUP_CONFIG_DIR)/target.$(1)" ]; then \ $(call DELEGATE_TARGET,makefile.target,TARGET_NAME=$(1) $(2),Fatal):;\ elif [ -e "$(1)" ]; then \ if [ -n "$(MAKEUP_VERBOSE)" ]; then \ echo " xxx - ignoring feral target($(2)) $(1)"; \ fi; \ else \ if [ -z "$(2)" ]; then \ echo " *+* No rule to make target $(1)"; \ else \ echo " *+* No rule to $(2) target $(1)"; \ fi; \ echo; \ fi; endef # $(call CREATE_CONF_Function,_file) # This function will emit a copy of the current default Makeup.conf to _file. define CREATE_CONF_Function echo " * Generating $(1)"; \ $(call CREATE_DIR_Function,$(patsubst %/,%,$(dir $(1)))) \ echo '# makeup configuration data.' > $(1); \ echo '#' >> $(1); \ echo '# Copyright 2003 - 2021, Ron ' >> $(1); \ echo >> $(1); \ echo 'MAKEUP_VERSION = 0.38' >> $(1); \ echo '#MAKEUP_VERBOSE = yes' >> $(1); \ echo >> $(1); \ echo 'MAKEUP_DIR = $$(top_srcdir)/Makeup' >> $(1); \ echo >> $(1); \ echo 'MAKEUP_CONFIG_DIR = $$(MAKEUP_DIR)/config' >> $(1); \ echo 'MAKEUP_CONF_M4_DIR = $$(MAKEUP_CONFIG_DIR)/m4' >> $(1); \ echo 'MAKEUP_AC_DIR = $$(MAKEUP_DIR)/ac-fragments' >> $(1); \ echo 'MAKEUP_AC_AUX_DIR = $$(MAKEUP_DIR)/ac-aux' >> $(1); \ echo 'MAKEUP_GMAKE_DIR = $$(MAKEUP_DIR)/gmake-fragments' >> $(1); \ echo 'MAKEUP_SWIG_DIR = $$(MAKEUP_DIR)/swig' >> $(1); \ echo 'MAKEUP_SWIG_IF_DIR = $$(MAKEUP_SWIG_DIR)/interfaces' >> $(1); \ echo 'MAKEUP_SWIG_WRAP_DIR = $$(MAKEUP_SWIG_DIR)/wrappers' >> $(1); \ echo 'MAKEUP_DOXYGEN_DIR = $$(MAKEUP_DIR)/doxygen' >> $(1); \ echo 'MAKEUP_DOXYGEN_THEME_DIR = $$(MAKEUP_DOXYGEN_DIR)/themes' >> $(1); \ echo 'MAKEUP_STAMP_DIR = $$(MAKEUP_DIR)/stamp' >> $(1); \ echo 'MAKEUP_TEST_DIR = $$(MAKEUP_DIR)/test' >> $(1); \ echo >> $(1); \ echo >> $(1); \ echo 'SYS_MAKEUP_DIR = /usr/share/makeup' >> $(1); \ echo >> $(1); \ echo 'SYS_MAKEUP_AC_DIR = $$(SYS_MAKEUP_DIR)/ac-fragments' >> $(1);\ echo 'SYS_MAKEUP_GMAKE_DIR = $$(SYS_MAKEUP_DIR)/gmake-fragments' >> $(1);\ echo 'SYS_MAKEUP_DOXYGEN_DIR = $$(SYS_MAKEUP_DIR)/doxygen' >> $(1); \ echo 'SYS_MAKEUP_DOXYGEN_THEME_DIR = $$(SYS_MAKEUP_DOXYGEN_DIR)/themes' >> $(1); \ echo >> $(1); \ echo >> $(1); \ echo 'MAKEUP_TARGET_TYPES = EXECUTABLE LIBRARY PLUGIN DATA SWIG KBUILD' >> $(1);\ echo 'MAKEUP_LINK_LANGUAGES = C C++' >> $(1); \ echo >> $(1); \ echo 'PROJECT_DIRS = doc include src' >> $(1); \ echo >> $(1); \ echo 'PACKAGE_CONF = $$(MAKEUP_CONFIG_DIR)/Package.conf' >> $(1); \ echo >> $(1); endef # $(call BOOTSTRAP_MERGE_Function,_file,_srcdir,_slavedir) # define BOOTSTRAP_MERGE_Function set -e; \ if [ -e "$(2)/$(1)" ]; then \ if [ -e "$(3)/$(1)" ]; then \ $(call MERGE_FILES_Function,$(1),$(2),$(3)) :; \ else \ $(call CREATE_DIR_Function,$(3)) \ echo " ++ Adding: $(3)/$(1)"; \ cp "$(2)/$(1)" "$(3)/$(1)"; \ fi; \ fi; endef # $(call BOOTSTRAP_MERGE_RECURSIVE_Function,_file,_srcdir,_slavedir) # As above, but also check, rather than barf on, all subdirectories. # Note the subshell which keeps scope for i. # define BOOTSTRAP_MERGE_RECURSIVE_Function set -e; \ ( set -e; \ do_merge() { \ if [ -z "$$1" ]; then return; fi; \ if [ -e "$$2/$$1" ]; then \ if [ -d "$$3/$$1" ]; then \ if [ -d "$$2/$$1" ]; then \ ( for i in $$(cd "$$2"/"$$1" && ls -d * 2> /dev/null); do \ do_merge "$$i" "$$2/$$1" "$$3/$$1"; \ done; \ ) \ else \ echo " *** error replacing a directory with a file not supported yet"; \ return; \ fi; \ elif [ -e "$$3/$$1" ]; then \ $(call MERGE_FILES_Function,$$1,$$2,$$3) :; \ else \ $(call CREATE_DIR_Function,$$3) \ echo " ++ Adding: $$3/$$1"; \ cp -a "$$2/$$1" "$$3/$$1"; \ fi; \ fi; \ }; \ do_merge "$(1)" "$(2)" "$(3)"; \ ) endef # Usually we get these from makefile.fstools, but we have a couple of # things to do before we can be sure that they are available to us here. ifndef VERBOSE_ANNOUNCE define VERBOSE_ANNOUNCE @if [ -n "$(MAKEUP_VERBOSE)" ]; then echo "--> Updating target: $@"; fi; endef endif ifndef CREATE_DIR_Function define CREATE_DIR_Function if [ ! -e $(1) ]; then \ echo " Creating dir: $(1)"; \ mkdir -p $(1); \ fi; endef endif # $(call REMOVE_DIRS_Function,_dirs,_message) # This function will unconditionally wipe all _dirs and their contents. define REMOVE_DIRS_Function $(if $(1),echo " Removing: $(if $(2),$(2),directories)"; \ $(foreach d,$(1), \ if [ -e "$(d)" ]; then \ echo " $(patsubst ./%,%,$(d))"; \ $(RM) -r $(d); \ elif [ -n "$(MAKEUP_VERBOSE)" ]; then \ echo " [$(patsubst ./%,%,$(d))]"; \ fi; \ ) \ ) endef # $(call PRUNE_DIRS_Function,_dirs) # This function will remove empty _dirs and any empty components of # their path up until the first otherwise non empty directory. If # the specified directory is not empty the error if any is left for # the caller to deal with appropriately. define PRUNE_DIRS_Function $(foreach d,$(1), \ $(if $(wildcard $(d)), \ echo " Pruning path to: $(d)"; rmdir -p $(d); )) endef # $(call REMOVE_FILES_Function,_files,_message) # This function will unconditionally delete all _files specified. define REMOVE_FILES_Function $(if $(1),echo " Removing: $(if $(2),$(2),files)"; \ $(foreach glob,$(1), \ for f in $(glob); do \ if [ -e "$$f" ] || [ -h "$$f" ]; then \ echo " $${f#./}"; \ $(RM) $$f; \ elif [ -n "$(MAKEUP_VERBOSE)" ]; then \ echo " [$${f#./}]"; \ fi; \ done; \ ) \ ) endef # Default target all : # Makeup self maintenance # ------------------------------------------------------------------ # The contents of Makeup.conf infects just about everything here, so # unless we are in the act of cleaning up, make sure it exists and is # up to date before whatever comes next. If a system config is found # that is newer than the existing config, prompt the user for a merge. # If there is no existing config, copy the system one. If there is no # system config either, generate one from this file. # # If the goal is sys-install and the config file does not already exist, # a flag stamp 'makeup-sys-install-created-conf' will be created so that # sys-install may remove the temporary config that is created. ifeq ($(findstring makefile.fstools,$(MAKEFILE_LIST)),) $(MAKEUP_TOP_CONFIG) : SHELL = bash $(MAKEUP_TOP_CONFIG) : $(if $(SYS_MAKEUP_DIR),$(SYS_MAKEUP_DIR)/Makeup.conf) $(VERBOSE_ANNOUNCE) ifeq ($(findstring clean,$(MAKECMDGOALS)),) ifneq ($(findstring sys-install,$(MAKECMDGOALS)),) @if [ ! -e $@ ]; then touch makeup-sys-install-created-conf; fi endif @$(call BOOTSTRAP_MERGE_Function,$(@F),$( Not updating $@ during $(MAKECMDGOALS)" @echo endif endif ifeq ($(strip $(SYS_MAKEUP_GMAKE_DIR)),) # Don't try to update this file if we don't know where to source a replacement $(THIS_MAKEFILE) : ; else ifeq ($(findstring makefile.fstools,$(MAKEFILE_LIST)),) # This rule will get us a copy of fstools if we don't have one yet, # it contains a more powerful rule that will supercede this one to # keep it updated. $(MAKEUP_GMAKE_DIR)/makefile.fstools : SHELL = bash $(MAKEUP_GMAKE_DIR)/makefile.fstools : $(SYS_MAKEUP_GMAKE_DIR)/makefile.fstools $(VERBOSE_ANNOUNCE) ifeq ($(findstring clean,$(MAKECMDGOALS)),) @$(call BOOTSTRAP_MERGE_Function,$(@F),$( Not updating $@ during $(MAKECMDGOALS)" @echo endif $(wildcard $(addprefix $(SYS_MAKEUP_AC_DIR)/,configure.* acsubst.* *.m4) \ $(SYS_MAKEUP_GMAKE_DIR)/makefile.*) \ $(SYS_MAKEUP_DIR)/Makeup.conf : $(SYS_MAKEUP_DIR)/% : $(VERBOSE_ANNOUNCE) ifeq ($(findstring sys-install,$(MAKECMDGOALS)),) @echo "*** Error attempting to create non existent '$@'" endif endif endif $(eval $(UPDATE_MAKEUP_Template)) # If there are no shared files on the system, and the local source has been # stripped for release, then we should just try to build it all as is. # Users of a package should not be burdened with extra dependencies for # developer functions that they do not need. ifneq ($(wildcard $(SYS_MAKEUP_GMAKE_DIR)),) CHECK_MAKEUP = yes else ifneq ($(wildcard $(MAKEUP_GMAKE_DIR)/makefile.utils),) ifneq ($(wildcard $(MAKEUP_AC_DIR)/makeup.m4),) CHECK_MAKEUP = yes endif endif # Package configuration # ------------------------------------------------------------------ $(eval $(GET_KBUILD_TARGETS_Template)) $(PACKAGE_CONF) : SHELL = bash $(PACKAGE_CONF) : $(VERBOSE_ANNOUNCE) ifeq ($(findstring clean,$(MAKECMDGOALS)),) ifneq ($(findstring makefile.fstools,$(MAKEFILE_LIST)),) @+( set -e; \ $(if $(wildcard $(MAKEUP_GMAKE_DIR)/makefile.utils),, \ $(call BOOTSTRAP_MERGE_Function,makefile.utils,$(SYS_MAKEUP_GMAKE_DIR),$(MAKEUP_GMAKE_DIR)))\ $(call DELEGATE_TARGET,makefile.utils,black-magic,Fatal) \ ) else @echo "*** Error: trying to create $@ before makefile.fstools"; \ exit 1; endif else @echo " <> Not creating $@ during $(MAKECMDGOALS)"; echo; endif # Note: do not add help for sys-install here, it is a magic target # in makefile.utils and make help-sys-install will not do what you # might hope it would. Document that one under install instead. # MAKEUP_HELP += config targets coverage MAKEUP_HELP_BRIEF_config := Interactively edit the project configuration. MAKEUP_HELP_BRIEF_targets := Interactively edit the project targets. MAKEUP_HELP_BRIEF_coverage := Report on the code coverage of tests. MAKEUP_HELP_config = $(call DELEGATE_HELP,makefile.utils,$@) : MAKEUP_HELP_targets = $(call DELEGATE_HELP,makefile.utils,$@) : MAKEUP_HELP_coverage = $(call DELEGATE_HELP,makefile.utils,$@) : config \ targets \ sys-install : @+$(call DELEGATE_TARGET,makefile.utils,$@,Fatal) # Build dir autotools targets. # ------------------------------------------------------------------ __PACKAGE_CONFIG_DIR := $(subst ./,,$(dir $(PACKAGE_CONFIG_HEADER))) __PACKAGE_CONFIG_PUBLIC := $(notdir $(PACKAGE_CONFIG_HEADER)) __PACKAGE_CONFIG_PRIVATE := private_$(__PACKAGE_CONFIG_PUBLIC) __PACKAGE_CONFIG_PRIV_AH := $(__PACKAGE_CONFIG_PRIVATE).in ifeq ($(CHECK_MAKEUP),yes) __MAKEUP_BUILD_CHECK := $(MAKEUP_STAMP_DIR)/autoscan-report-stamp \ $(top_builddir)/include/$(__PACKAGE_CONFIG_PRIVATE) \ $(top_builddir)/include/$(PACKAGE_CONFIG_HEADER) endif Makefile : | $(__MAKEUP_BUILD_CHECK) Makefile : | $(foreach f,$(subst $(MAKEUP_CONFIG_DIR)/acfile.,, \ $(wildcard $(MAKEUP_CONFIG_DIR)/acfile.*)),$(f)) Makefile.acsubst : $(MAKEUP_GMAKE_DIR)/makefile.acsubst Makefile.acsubst Makefile : $(top_builddir)/config.status @echo -n " * Updating $@... " @cd $(top_builddir) && ./config.status $@ # Don't include this rule during 'unconfigured' maintenance operations. ifdef MAKEUP_PLATFORM_HEADER $(top_builddir)/include/$(PACKAGE_CONFIG_HEADER) \ $(top_builddir)/include/$(__PACKAGE_CONFIG_DIR)$(MAKEUP_PLATFORM_HEADER) \ $(top_builddir)/include/$(__PACKAGE_CONFIG_DIR)$(MAKEUP_FLAVOUR_HEADER) : \ $(top_builddir)/$(__PACKAGE_CONFIG_PUBLIC)-stamp @if [ ! -e $@ ]; then \ echo -n " * Restoring missing $(__PACKAGE_CONFIG_DIR)$(@F)... "; \ cd $(top_builddir) \ && ./config.status include/$(PACKAGE_CONFIG_HEADER); \ fi endif $(top_builddir)/$(__PACKAGE_CONFIG_PUBLIC)-stamp : $(top_builddir)/config.status @echo -n " * Updating $(PACKAGE_CONFIG_HEADER)... " @cd $(top_builddir) \ && ./config.status include/$(PACKAGE_CONFIG_HEADER) \ && touch $@ $(top_builddir)/include/$(__PACKAGE_CONFIG_PRIVATE) : $(top_builddir)/$(__PACKAGE_CONFIG_PRIVATE)-stamp @if [ ! -e $@ ]; then \ echo -n " * Restoring missing $(__PACKAGE_CONFIG_PRIVATE)... "; \ cd $(top_builddir) \ && ./config.status include/$(__PACKAGE_CONFIG_PRIVATE); \ fi $(top_builddir)/$(__PACKAGE_CONFIG_PRIVATE)-stamp : $(top_srcdir)/$(__PACKAGE_CONFIG_PRIV_AH) \ $(top_builddir)/config.status @echo -n " * Updating $(__PACKAGE_CONFIG_PRIVATE)... " @cd $(top_builddir) \ && ./config.status include/$(__PACKAGE_CONFIG_PRIVATE) \ && touch $@ $(foreach f,$(subst $(MAKEUP_CONFIG_DIR)/acfile.,, \ $(wildcard $(MAKEUP_CONFIG_DIR)/acfile.*)),$(f)) : \ % : $(MAKEUP_CONFIG_DIR)/acfile.% $(top_builddir)/config.status @echo -n " * Updating $@... " @cd $(top_builddir) && ./config.status $@ $(top_builddir)/config.status : $(top_srcdir)/configure @echo " * Checking config.status" @cd $(top_builddir) && ./config.status --recheck # Source dir autotools targets. # ------------------------------------------------------------------ # Don't try to use a default target to update package config dependencies. # If any do need updating in that way they will have an explicit rule. $(MAKEUP_CONFIG_DIR)/% : ; ifneq ($(CHECK_MAKEUP),yes) $(top_srcdir)/configure : ; $(MAKEUP_GMAKE_DIR)/% : ; $(MAKEUP_AC_DIR)/% : ; else $(top_srcdir)/$(__PACKAGE_CONFIG_PRIV_AH) : $(MAKEUP_STAMP_DIR)/$(__PACKAGE_CONFIG_PRIV_AH)-stamp @if [ ! -e $@ ]; then \ echo " * Restoring missing $(__PACKAGE_CONFIG_PRIV_AH)"; \ cd $(top_srcdir) && autoheader; \ fi; $(MAKEUP_STAMP_DIR)/$(__PACKAGE_CONFIG_PRIV_AH)-stamp : $(top_srcdir)/configure.ac $(VERBOSE_ANNOUNCE) @echo " * Checking $(__PACKAGE_CONFIG_PRIV_AH)" @( cd $(top_srcdir) && autoheader ) && $(call STAMP_Function,$@) :; @if [ -e $(top_srcdir)/$(__PACKAGE_CONFIG_PRIV_AH)~ ] && \ [ -z "$$(cat $(top_srcdir)/$(__PACKAGE_CONFIG_PRIV_AH)~)" ]; then \ $(call REMOVE_FILES_Function,$(top_srcdir)/$(__PACKAGE_CONFIG_PRIV_AH)~ \ ,"empty $(__PACKAGE_CONFIG_PRIV_AH)~") :; \ fi; $(top_srcdir)/configure : $(addprefix $(top_srcdir)/,configure.ac aclocal.m4) \ | $(top_srcdir)/$(__PACKAGE_CONFIG_PRIV_AH) $(VERBOSE_ANNOUNCE) @echo " * Generating configure" @cd $(top_srcdir) && autoconf $(top_srcdir)/configure.ac : $(MAKEUP_TOP_CONFIG) \ $(PACKAGE_CONF) \ $(MAKEUP_GMAKE_DIR)/makefile.utils \ $(addprefix $(MAKEUP_AC_DIR)/, makeup.m4 \ $(foreach t,$(PACKAGE_TESTS), \ $(if $(wildcard $(MAKEUP_CONFIG_DIR)/$(t)),,$(t))) \ $(foreach t,$(subst configure,acsubst,$(PACKAGE_TESTS)),\ $(if $(wildcard $(SYS_MAKEUP_AC_DIR)/$(t)), \ $(if $(wildcard $(MAKEUP_CONFIG_DIR)/$(t)),,$(t)))))\ $(wildcard $(MAKEUP_CONFIG_DIR)/configure.*) \ $(wildcard $(MAKEUP_CONFIG_DIR)/acsubst.*) \ $(wildcard $(MAKEUP_CONFIG_DIR)/acfile.*) \ $(KBUILD_TARGET_FILES) \ | $(addprefix $(MAKEUP_GMAKE_DIR)/, \ makefile.makeup makefile.acsubst \ $(if $(KBUILD_TARGET_FILES), \ makefile.kbuild)) \ $(foreach d,$(PACKAGE_DOCS), \ $(MAKEUP_DOXYGEN_DIR)/doxygen.$(d) \ doc_theme-$(d)) \ $(if $(wildcard $(top_srcdir)/.git), \ $(top_srcdir)/.gitignore) @+$(call DELEGATE_TARGET,makefile.utils,$@,Fatal) $(top_srcdir)/aclocal.m4 : $(top_srcdir)/configure.ac \ $(addprefix $(MAKEUP_AC_DIR)/, \ $(foreach t,$(PACKAGE_M4), \ $(if $(wildcard $(SYS_MAKEUP_AC_DIR)/$(t)), \ $(if $(wildcard $(MAKEUP_AC_DIR)/$(t)),,$(t)))))\ $(wildcard $(MAKEUP_AC_DIR)/*.m4) \ $(wildcard $(MAKEUP_CONF_M4_DIR)/*.m4) @+$(call DELEGATE_TARGET,makefile.utils,$@,Fatal) $(top_srcdir)/.gitignore : @+$(call DELEGATE_TARGET,makefile.utils,$@) $(MAKEUP_STAMP_DIR)/autoscan-report-stamp : $(top_srcdir)/configure.ac @+$(call DELEGATE_TARGET,makefile.utils,$@,$(call STAMP_Function,$@):;) endif # Build targets # ------------------------------------------------------------------ all : $(PACKAGE_TARGETS) fast : NUM_CPUS = $(shell getconf _NPROCESSORS_ONLN 2>/dev/null) fast : NJOBS = -j$(or $(NUM_CPUS),1) NJOBS="" fast : $(MAKE) $(NJOBS) -f $(THIS_MAKEFILE) all # Don't generate Makefile.interdeps until configure has been run. # The target makefiles may include substitutions that won't be available before that. ifeq ($(findstring update-interdeps,$(MAKECMDGOALS)),) $(if $(findstring Makefile.acsubst,$(MAKEFILE_LIST)), $(eval $(MORE_MAKEUP) Makefile.interdeps)) endif update-interdeps : EMPTY = update-interdeps : @echo 'include $(MAKEUP_CONFIG_DIR)/target.$$(TARGET_NAME)' > Makefile.makeinterdeps @echo >> Makefile.makeinterdeps @echo 'grep-deps: TARGET_DEPS = $$($$(TARGET_NAME)_TARGET_DEPS)'>> Makefile.makeinterdeps @echo 'grep-deps:' >> Makefile.makeinterdeps @echo ' @[ -z "$$(TARGET_DEPS)" ] || echo "$$(TARGET_NAME) : $$(TARGET_DEPS)" >> Makefile.interdeps' \ >> Makefile.makeinterdeps @echo >> Makefile.makeinterdeps @echo '.PHONY: grep-deps' >> Makefile.makeinterdeps @echo '# makeup generated boilerplate for $(PACKAGE_NAME) target interdependencies' > Makefile.interdeps @echo '# Do not edit, your changes will be lost.' >> Makefile.interdeps @echo >> Makefile.interdeps @echo " * Generating Makefile.interdeps" @for t in $(wildcard $(MAKEUP_CONFIG_DIR)/target.*); do \ $(MAKE) --no-print-directory -f Makefile.makeinterdeps \ TARGET_NAME="$${t##*target.}" \ grep-deps; \ done @$(RM) Makefile.makeinterdeps Makefile.interdeps : $(THIS_MAKEFILE) $(wildcard $(MAKEUP_CONFIG_DIR)/target.*) @$(MAKE) --no-print-directory -f $(THIS_MAKEFILE) update-interdeps # Delegation target. % : force-update @if [ -n "$(MAKEUP_VERBOSE)" ] ; then \ echo "==> $*"; \ fi; $(VERBOSE_ANNOUNCE) @+if [ "$*" != "$(THIS_MAKEFILE)" ]; then \ if [ -r "$(MAKEUP_CONFIG_DIR)/makefile.$*" ]; then \ $(call NEXT_GOAL_Function,$(MAKEUP_CONFIG_DIR)/makefile.$*) :;\ else \ $(call MAKEUP_FOR_TARGET,$*) \ fi; \ elif [ -n "$(MAKEUP_VERBOSE)" ] ; then \ echo "*** Not updating this makefile: $*"; \ fi .PHONY: all fast update-interdeps # Documentation # ------------------------------------------------------------------ MAKEUP_HELP += docs MAKEUP_HELP_BRIEF_docs := Generate documentation for your project. define MAKEUP_HELP_docs echo " The 'docs' target will generate all documentation for your project." echo " You can generate individual parts of the documentation by using" echo " 'make doc-'." echo echo " The default modules are:" echo echo " doc-source - for source browser documentation." echo " doc-user - for user level documentation." echo " doc-clean - purge all generated documentation files." echo echo " Package.conf settings:" echo " PACKAGE_DOCS = source user [ custom ... ]" echo " PACKAGE_DOC__THEME = [ parchment | striped ]" echo echo " If no theme is specified for a particular module, the doxygen default" echo " will be used. None of the above settings are mandatory." echo " If PACKAGE_DOCS is empty no docs will be generated by makeup." endef docs : @$(foreach d,$(PACKAGE_DOCS),$(call NEXT_GOAL_Function,$(THIS_MAKEFILE),doc-$(d))) # So here, we need to: # check if the doc theme exists locally. # check if it exists on the system # copy or update as required # create the new theme link. # run doxygen. # copy any theme files overwriting the generated ones. doc-% : SHELL = bash $(foreach d,$(PACKAGE_DOCS),doc-$(d)) : doc-% : $(top_builddir)/doc/doxyconf.% doc_theme-% $(VERBOSE_ANNOUNCE) @if [ -n "$(PACKAGE_DOC_$*_THEME)" ]; then \ cd $( - install an individual target." echo " install_lib- - install the runtime parts of a library target." echo " install_dev- - install the headers and build time parts of" echo " a library target. Equivalent to calling both" echo " of the following install_dev_* targets." echo " install_dev_arch-" echo " - install the platform dependent build time parts" echo " of a library target." echo " install_dev_indep-" echo " - install the headers and platform independent" echo " build time parts of a library target." echo " install_setup - install config headers." echo " install_i18n - install internationalisation files." endef # Install all default targets. install : @$(RM) "$(top_builddir)/__need_{ldconfig,setup}" @+$(call MAKEUP_FOR_EACH,$(PACKAGE_TARGETS),install) @if [ -e "$(top_builddir)/__need_setup" ]; then \ $(RM) "$(top_builddir)/__need_setup"; \ if [ -n "$(__PACKAGE_CONFIG_DIR)" ]; then \ echo; \ $(MAKE) --no-print-directory install_setup; \ fi; \ fi @if [ -e "$(top_builddir)/__need_ldconfig" ]; then \ $(RM) "$(top_builddir)/__need_ldconfig"; \ echo " ldconfig"; \ ldconfig; \ fi # Install individual targets. install-% : force-update @+$(call MAKEUP_FOR_TARGET,$*,install) @$(RM) "$(top_builddir)/__need_{ldconfig,setup}" # Install run time parts of a library target. install_lib-% : force-update @+$(call MAKEUP_FOR_TARGET,$*,install-lib) # Install headers and build time parts of a library target. # Equivalent to calling both install_dev_arch and install_dev_indep. install_dev-% : force-update @+$(call MAKEUP_FOR_TARGET,$*,install-dev) # Install platform dependent build time parts of a library target. install_dev_arch-% : force-update @+$(call MAKEUP_FOR_TARGET,$*,install-dev-arch) # Install headers and platform independent build time parts of a library target. install_dev_indep-% : force-update @+$(call MAKEUP_FOR_TARGET,$*,install-dev-indep) install_setup : @$(call CREATE_DIR_Function,$(DESTDIR)$(includedir)/$(__PACKAGE_CONFIG_DIR)) \ echo " Installing setup : $(PACKAGE_CONFIG_HEADER)"; \ $(INSTALL_DATA) include/$(PACKAGE_CONFIG_HEADER) \ $(DESTDIR)$(includedir)/$(__PACKAGE_CONFIG_DIR); \ echo " Installing setup : $(__PACKAGE_CONFIG_DIR)$(MAKEUP_FLAVOUR_HEADER)"; \ $(INSTALL_DATA) include/$(__PACKAGE_CONFIG_DIR)$(MAKEUP_FLAVOUR_HEADER) \ $(DESTDIR)$(includedir)/$(__PACKAGE_CONFIG_DIR); \ echo " Installing setup : $(__PACKAGE_CONFIG_DIR)$(MAKEUP_PLATFORM_HEADER)"; \ cp -d include/$(__PACKAGE_CONFIG_DIR)$(MAKEUP_PLATFORM_HEADER) \ $(DESTDIR)$(includedir)/$(__PACKAGE_CONFIG_DIR) install_i18n : @$(foreach mofile, \ $(foreach lingua,$(ALL_LINGUAS), \ $(wildcard locale/$(lingua)/LC_MESSAGES/*.mo)), \ $(call INSTALL_DATA_Function,$(mofile),$(DESTDIR)$(datadir), \ $(top_builddir),message catalogue)) # Clean targets # ------------------------------------------------------------------ MAKEUP_HELP += clean MAKEUP_HELP_BRIEF_clean := Clean project build directories. define MAKEUP_HELP_clean echo " Clean related targets are:" echo echo " clean - clean all default targets." echo " clean- - clean an individual target." echo " allclean- - clean an individual target and its dependencies." echo " extra-clean - clean all default and extra targets, including" echo " i18n message catalogues." echo " dist-clean - clean everything except the makeup boilerplate" echo " and configure script. After this you should" echo " still be able to ./configure && make" echo " release-clean - similar to dist-clean, but also removes all" echo " makeup files not essential to simply building" echo " the package, cleans all default and extra targets," echo " including i18n message catalogues." echo " repo-clean - clean everything that is regenerable if you have" echo " makeup installed on the system. After this you" echo " will need to run makeup to return to the dist-clean" echo " state." echo echo " makeup-dist-clean - you should never want to run this target on your" echo " own code, it is for removing everything *except*" echo " the makeup boilerplate when preparing makeup itself" echo " for distribution." endef # Clean individual targets. clean-% : force-update @+$(call MAKEUP_FOR_TARGET,$*,clean) # Clean individual targets and their dependencies allclean-% : force-update @+$(call MAKEUP_FOR_TARGET,$*,allclean) # Clean all default targets. clean : $(addprefix clean-,$(PACKAGE_TARGETS)) @$(RM) *.gcda *.gcno "$(top_builddir)/__need_{ldconfig,setup}" # Clean all default and extra targets, including i18n message catalogues, # and any files generated from acfile.* inputs. extra-clean : $(addprefix clean-,$(subst $(MAKEUP_CONFIG_DIR)/target.,, \ $(wildcard $(MAKEUP_CONFIG_DIR)/target.*))) @$(call REMOVE_FILES_Function, \ $(foreach f,$(subst $(MAKEUP_CONFIG_DIR)/acfile.,, \ $(wildcard $(MAKEUP_CONFIG_DIR)/acfile.*)),$(f)) \ ,"generated files") @$(call REMOVE_DIRS_Function, coverage $(wildcard coverage-*) \ ,"coverage report dirs") @$(RM) *.gcda *.gcno "$(top_builddir)/__need_{ldconfig,setup}" # Clean everything except the makeup boilerplate and configure script. # After this you should still be able to ./configure && make dist-clean : $(if $(IGNORE_TARGETS),,extra-clean doc-clean) @$(call REMOVE_FILES_Function, \ Makefile \ Makefile.acsubst \ Makefile.interdeps \ Makefile.makeinterdeps \ $(wildcard Makefile.acsubst.*) \ $(__PACKAGE_CONFIG_PUBLIC)-stamp \ $(__PACKAGE_CONFIG_PRIVATE)-stamp \ include/$(__PACKAGE_CONFIG_PRIVATE) \ include/$(PACKAGE_CONFIG_HEADER) \ $(patsubst %,include/$(__PACKAGE_CONFIG_DIR)%*_$(__PACKAGE_CONFIG_PUBLIC),\ linux bsd msw) \ $(addprefix config.,log status) \ $(top_srcdir)/$(__PACKAGE_CONFIG_PRIV_AH)~ \ $(top_builddir)/__need_ldconfig \ $(top_builddir)/__need_setup \ ,"generated files") @$(call REMOVE_DIRS_Function,$(top_srcdir)/autom4te.cache $(MAKEUP_STAMP_DIR) \ ,"build cruft dirs") @-$(call PRUNE_DIRS_Function,include/$(__PACKAGE_CONFIG_DIR)) # Similar to dist-clean, but also removes all makeup files not essential # to simply building the package. release-clean : dist-clean @$(call REMOVE_FILES_Function, \ $(addprefix $(MAKEUP_GMAKE_DIR)/makefile.,edit utils colour) \ ,"makeup maintainer utility files") @$(call REMOVE_DIRS_Function,$(MAKEUP_TEST_DIR),"test output") # Clean everything that is regenerable if you have makeup installed on # the system. After this you will need to run makeup to return to # the dist-clean state. repo-clean : dist-clean @$(call REMOVE_DIRS_Function,$(MAKEUP_AC_DIR),"autoconf fragments") @$(call REMOVE_FILES_Function, \ $(addprefix $(top_srcdir)/,aclocal.m4 \ $(__PACKAGE_CONFIG_PRIV_AH) \ configure configure.ac) \ $(MAKEUP_TOP_CONFIG) \ ,"regenerable autoconf") @$(call REMOVE_FILES_Function, \ $(addprefix $(MAKEUP_DOXYGEN_DIR)/doxygen.,source user) \ $(addprefix $(MAKEUP_DOXYGEN_DIR)/themes/,parchment.css striped.css) \ ,"doxygen boilerplate") @$(call REMOVE_DIRS_Function, \ $(addprefix $(MAKEUP_DOXYGEN_DIR)/themes/,parchment striped) \ ,"doxygen boilerplate themes") @$(if $(ALL_LINGUAS), \ $(call REMOVE_FILES_Function, \ $(foreach lingua,$(ALL_LINGUAS), \ $(wildcard locale/$(lingua)/LC_MESSAGES/*.mo)) \ ,"binary message catalogues") \ ) @$(call REMOVE_DIRS_Function, \ $(MAKEUP_AC_DIR) $(MAKEUP_AC_AUX_DIR) $(MAKEUP_GMAKE_DIR) \ ,"makeup boilerplate") @-echo " Pruning path to: $(MAKEUP_DIR)/doxygen/themes"; \ cd $(MAKEUP_DIR) && rmdir -p doxygen/themes @$(call REMOVE_DIRS_Function, \ $(MAKEUP_SWIG_WRAP_DIR) \ ,"generated swig wrappers") # You should never want to run the following target on your own code, # it is for removing everything *except* the makeup boilerplate when # preparing makeup itself for distribution. makeup-dist-clean : dist-clean @if [ "$(MAKEUP_DIST_CLEAN_PROMPT)" != "No" ]; then \ echo; \ echo "Executing this target will destroy all project configuration"; \ echo "and source files leaving only the Makeup boilerplate."; \ echo "If you really meant to call this, you can avoid this message"; \ echo "in the future by setting MAKEUP_DIST_CLEAN_PROMPT = No"; \ echo "If you didn't, now is your chance to back away slowly."; \ echo; \ echo -n "Really *DESTROY* existing project config ? (yes/N) "; \ read yesno; \ case $${yesno} in \ YES | yes) \ echo "Ok then, its your hard work..."; \ ;; \ *) \ echo "I thought not... Stopping"; \ exit 1; \ ;; \ esac; \ fi; @$(call REMOVE_FILES_Function,$(top_srcdir)/$(MAKEUP_TOP_CONFIG) \ ,"generated config") @$(call REMOVE_DIRS_Function, \ $(addprefix $(top_srcdir)/Makeup/,config stamp) \ ,"package config") @$(call PRUNE_DIRS_Function,include src) # Distribution targets # ------------------------------------------------------------------ # These are new options for 0.18, so fall back to some sensible defaults # if they aren't already set in Package.conf for some existing package. PACKAGE_DIST_TYPE ?= tar PACKAGE_DIST_COMPRESS ?= gz PACKAGE_BUILD_ROOTCMD ?= fakeroot # This one we don't usually expect to find defined in Package.conf, since the # files listed aren't actually in the distributed package. Two types of input # file are read to build the list of excluded files: # - dist-exclude.* # These files contain a simple list of (space or newline separated) paths to # files or directories that should not be included in distribution packages. # Shell globs may be used in this list. # - dist-exclude-override.* # These files are included directly as make syntax. They may be used for # manipulating the PACKAGE_DIST_EXCLUDE list in a more direct manner when # complex or rule based lists are desired. They are included after the list # of paths (if any) from dist-exclude.* files have been imported. Usually # they should contain just an assignment of some form to PACKAGE_DIST_EXCLUDE. PACKAGE_DIST_EXCLUDE = $(strip $(shell cat $(MAKEUP_CONFIG_DIR)/dist-exclude.* 2> /dev/null)) # Include any dist-exclude-override.* files without trying to update them. $(foreach f,$(wildcard $(MAKEUP_CONFIG_DIR)/dist-exclude-override.*),$(eval $(call JUST_INCLUDE,$(f)))) MAKEUP_HELP += dist MAKEUP_HELP_BRIEF_dist := Create a package for distribution. define MAKEUP_HELP_dist echo " Dist related targets are:" echo echo " dist - create a package using the default dist type." echo echo " tardist - create a tar.$(PACKAGE_DIST_COMPRESS) snapshot of the current working dir." echo " targzdist - create a tar.gz snapshot of the current working dir." echo " tarbz2dist - create a tar.bz2 snapshot of the current working dir." echo echo " gitdist - create a tar.$(PACKAGE_DIST_COMPRESS) snapshot of the git branch HEAD." echo " gitgzdist - create a tar.gz snapshot of the git branch HEAD." echo " gitbz2dist - create a tar.bz2 snapshot of the git branch HEAD." echo echo " debdist - create a Debian source package from the git branch HEAD." echo " debbuild - create a Debian binary package from the git branch HEAD." echo echo " The 'tardist' and 'gitdist' targets use the compressor defined by the" echo " parameter PACKAGE_DIST_COMPRESS. The 'dist' target default is defined" echo " by the value of PACKAGE_DIST_TYPE." echo echo " Package.conf parameters:" echo " PACKAGE_DIST_TYPE = [ tar | git | deb ] (currently $(PACKAGE_DIST_TYPE))" echo " PACKAGE_DIST_COMPRESS = [ gz | bz2 ] (currently $(PACKAGE_DIST_COMPRESS))" endef # $(eval $(call MAKEUP_TAR_DIST_Template,_tarflag,_suffix)) # This target will create a compressed tar archive. # If a tar archive is the default `make dist` type for the package, then the # list of files in PACKAGE_DIST_EXCLUDE will not be included in the archive. # # If the default dist type is git or deb, then it is assumed that this target # will not be used for public releases, but may be used as a convenience to # generate local snapshots for testing. In that case we generate a tarball # that includes everything in the current working dir, except git repo data # and any files removed by the release-clean target. define MAKEUP_TAR_DIST_Template tar$(2)dist: SHELL = bash tar$(2)dist: DISTNAME = $$(PACKAGE_NAME)-$$(PACKAGE_VERSION) tar$(2)dist: TARDIR = $$(top_srcdir)/../tar-packages tar$(2)dist: @if [ -z "$$(TARDIR)" -o -z "$$(DISTNAME)" ]; then \ echo "*** ERROR: bad dist dir:" \ "TARDIR='$$(TARDIR)' DISTNAME='$$(DISTNAME)'"; \ exit 1; \ fi mkdir -p $$(TARDIR) $(RM) -r $$(TARDIR)/$$(DISTNAME){,.tar.$(2)} cp -al $$(top_srcdir) $$(TARDIR)/$$(DISTNAME) @if [ "$$(strip $$(PACKAGE_DIST_TYPE))" = "tar" ] && \ [ -n "$$(strip $$(PACKAGE_DIST_EXCLUDE))" ]; then \ echo "$(RM) -r $$(addprefix $$(DISTNAME)/,$$(PACKAGE_DIST_EXCLUDE))"; \ $(RM) -r $$(addprefix $$(TARDIR)/$$(DISTNAME)/,$$(PACKAGE_DIST_EXCLUDE)); \ $(RM) "$$(TARDIR)/$$(DISTNAME)/$$(call SRCPATH_Function,$$(MAKEUP_CONFIG_DIR))/"dist-exclude*; \ fi cd $$(TARDIR)/$$(DISTNAME) && $$(MAKE) release-clean cd $$(TARDIR) && tar --exclude=.git --exclude=.gitignore \ --exclude=.gitattributes \ -$(1)cf $$(DISTNAME).tar.$(2) $$(DISTNAME) $(RM) -r $$(TARDIR)/$$(DISTNAME) @if [ "$$(strip $$(PACKAGE_DIST_TYPE))" != "tar" ] && \ [ -n "$$(strip $$(PACKAGE_DIST_EXCLUDE))" ]; then \ echo; \ echo " NOTE: Normally excluded files have not been removed";\ echo ' To make a public release use `make dist` instead';\ echo; \ fi endef # $(eval $(call MAKEUP_GIT_DIST_Template,_tarflag,_suffix,_compressor)) # This target will create a compressed tar archive. define MAKEUP_GIT_DIST_Template git$(2)dist: DISTNAME = $$(PACKAGE_NAME)-$$(PACKAGE_VERSION) git$(2)dist: GITDIR = $$(top_srcdir)/../git-packages git$(2)dist: @if [ -z "$$(GITDIR)" -o -z "$$(DISTNAME)" ]; then \ echo "*** ERROR: bad dist dir:" \ "GITDIR='$$(GITDIR)' DISTNAME='$$(DISTNAME)'"; \ exit 1; \ fi @if [ ! -d "$$(top_srcdir)/.git" ]; then \ echo "*** ERROR: not a git repository"; \ exit 1; \ fi mkdir -p $$(GITDIR) mkdir $$(GITDIR)/tmpdist $(RM) $$(GITDIR)/$$(DISTNAME).tar.$(2) @echo "git archive --format=tar --prefix=$$(DISTNAME)/ HEAD |" @( cd $$(top_srcdir) && \ git archive --format=tar --prefix=$$(DISTNAME)/ HEAD ) | \ ( cd $$(GITDIR)/tmpdist && tar xf - && \ if [ -n "$$(strip $$(PACKAGE_DIST_EXCLUDE))" ]; then \ echo "$(RM) -r $$(addprefix $$(DISTNAME)/,$$(PACKAGE_DIST_EXCLUDE))"; \ $(RM) -r $$(addprefix $$(DISTNAME)/,$$(PACKAGE_DIST_EXCLUDE)); \ $(RM) "$$(DISTNAME)/$$(call SRCPATH_Function,$$(MAKEUP_CONFIG_DIR))/"dist-exclude*; \ fi && \ ( cd $$(DISTNAME) && makeup unix-target-config project-release ) && \ echo "tar -$(1)cf ../$$(DISTNAME).tar.$(2) $$(DISTNAME)" && \ tar --exclude=.gitignore --exclude=.gitattributes \ -$(1)cf ../$$(DISTNAME).tar.$(2) $$(DISTNAME) ) $(RM) -r $$(GITDIR)/tmpdist endef $(eval $(call MAKEUP_TAR_DIST_Template,z,gz)) $(eval $(call MAKEUP_TAR_DIST_Template,j,bz2)) tardist: tar$(strip $(PACKAGE_DIST_COMPRESS))dist $(eval $(call MAKEUP_GIT_DIST_Template,z,gz,gzip)) $(eval $(call MAKEUP_GIT_DIST_Template,j,bz2,bzip2)) gitdist: git$(strip $(PACKAGE_DIST_COMPRESS))dist dist: $(strip $(PACKAGE_DIST_TYPE))dist # Only fetch this (once) if we really need it ifneq ($(filter deb%,$(MAKECMDGOALS)),) GET_DEB_VERSION = $(strip $(if $(wildcard $(1)/debian), \ $(shell cd $(1) && dpkg-parsechangelog \ | awk '/Version:/ {print $$2}'))) DEBVER := $(call GET_DEB_VERSION, $(top_srcdir)) endif debdist: SHELL = bash debdist: DISTNAME = $(PACKAGE_NAME)-$(PACKAGE_VERSION) debdist: DEBDIR = $(top_srcdir)/../deb-packages debdist: @if [ -z "$(DEBDIR)" -o -z "$(DISTNAME)" ]; then \ echo "*** ERROR: bad dist dir:" \ "DEBDIR='$(DEBDIR)' DISTNAME='$(DISTNAME)'"; \ exit 1; \ fi @if [ ! -d "$(top_srcdir)/debian" ]; then \ echo "*** ERROR: not debianised source"; \ exit 1; \ fi @if [ -z "$(DEBVER)" ]; then \ echo "*** ERROR: failed to get debian version"; \ exit 1; \ fi mkdir -p $(DEBDIR) $(RM) -r $(DEBDIR)/$(DISTNAME){,.orig} @echo "git archive --format=tar --prefix=$(DISTNAME)/ HEAD |"; @(cd $(top_srcdir) && \ git archive --format=tar --prefix=$(DISTNAME)/ HEAD ) | \ ( cd $(DEBDIR) && tar xf - && \ if [ -n "$(strip $(PACKAGE_DIST_EXCLUDE))" ]; then \ echo "$(RM) -r $(addprefix $(DISTNAME)/,$(PACKAGE_DIST_EXCLUDE))"; \ $(RM) -r $(addprefix $(DISTNAME)/,$(PACKAGE_DIST_EXCLUDE)); \ $(RM) "$(DISTNAME)/$(call SRCPATH_Function,$(MAKEUP_CONFIG_DIR))/"dist-exclude*; \ fi && \ ( cd $(DISTNAME) && \ for f in $$(find -name ".gitignore"); do $(RM) $$f; done &&\ makeup unix-target-config project-release && \ $(PACKAGE_BUILD_ROOTCMD) debian/rules clean ) && \ case $(DEBVER) in \ *-*) \ echo "dpkg-source -b -sR $(DISTNAME)"; \ cp -al $(DISTNAME) $(DISTNAME).orig \ && $(RM) -r $(DISTNAME).orig/debian \ && dpkg-source -b -sR $(DISTNAME) \ ;; \ *) \ echo "dpkg-source -b -sn $(DISTNAME)"; \ dpkg-source -b -sn $(DISTNAME) \ ;; \ esac ) debbuild: DISTNAME = $(PACKAGE_NAME)-$(PACKAGE_VERSION) debbuild: DEBDIR = $(top_srcdir)/../deb-packages debbuild: debdist cd $(DEBDIR)/$(DISTNAME) \ && dpkg-buildpackage -r$(strip $(PACKAGE_BUILD_ROOTCMD)) 2>&1 \ | tee ../build-$(PACKAGE_NAME)-$(DEBVER).log .PHONY: dist tardist targzdist tarbz2dist gitdist gitgzdist gitbz2dist \ debdist debbuild # Help targets # ------------------------------------------------------------------ $(eval $(MAKEUP_HELP_Template)) .PHONY : config targets sys-install source-doc user-doc allmo allpo tests \ install install_setup install_i18n clean extra-clean doc-clean \ dist-clean release-clean repo-clean makeup-dist-clean help force-update bit-babbler-0.9/Makeup/gmake-fragments/makefile.target0000644000000000000000000000760214125243667017713 0ustar # makeup target build rules # # Copyright 2003 - 2010, Ron # # This file is distributed under the terms of the GNU GPL version 2. # # As a special exception to the GPL, it may be distributed without # modification as a part of a program using a makeup generated build # system, under the same distribution terms as the program itself. ifeq ($(strip $(TARGET_NAME)),) $(error No TARGET_NAME for template) endif ifndef top_srcdir include Makefile.acsubst endif ifndef MAKEUP_DEFAULT_LINKAGE MAKEUP_DEFAULT_LINKAGE = unknown endif ifeq ($(origin EXEEXT),undefined) EXEEXT = {,.exe} endif ifeq ($(origin DSOEXT),undefined) DSOEXT = {.so,.dll} endif # Usually defined in makefile.acsubst. But may be overridden. ifndef MAKEUP_TOP_CONFIG MAKEUP_TOP_CONFIG = $(top_srcdir)/Makeup/Makeup.conf endif include $(MAKEUP_TOP_CONFIG) include $(MAKEUP_GMAKE_DIR)/makefile.fstools include $(MAKEUP_CONFIG_DIR)/target.$(TARGET_NAME) # FIXME: enable platform tests here... # Not really appropriate anymore, what do we need here? # merge makefile.unix, or move it to makefile.c-c++ ?? #ifeq platform, posix ifneq ($(strip $(MAKEUP_VERBOSE)),) include $(MAKEUP_GMAKE_DIR)/makefile.unix else -include $(MAKEUP_GMAKE_DIR)/makefile.unix endif #endif # Helper function to set a pattern specific vpath SET_VPATH_Template = vpath $(1) $(2) # Evaluate a target_VPATHS string of the form: # foo_VPATHS = %.cpp,/mysrc:/moresrc %.h,/myinc %.c,$(srcdir)/csrc # into a series of vpath statements like this: # vpath %.cpp /mysrc:/moresrc # vpath %.h /myinc # vpath %.c $(srcdir)/csrc # # The double eval splits each space-separated VPAIR on the commas. $(foreach VPAIR,$($(TARGET_NAME)_VPATHS), \ $(eval $$(eval $$(call SET_VPATH_Template,$(VPAIR))))) # GNU Make 3.80 fails to properly read an eval inside a conditional # Fortunately it can include a file with one just fine, or can still # be used conditionally with an extra level of indirection such as this. TARGET_RULES = $($($(TARGET_NAME)_TYPE)_Template) ifdef MAKEUP_HOST_ARCH ifdef $(TARGET_NAME)_ARCH ifeq ($(findstring ANY,$($(TARGET_NAME)_ARCH)),) ifeq ($(findstring $(MAKEUP_HOST_ARCH),$($(TARGET_NAME)_ARCH)),) ifeq ($(strip $(MAKECMDGOALS)),) define TARGET_RULES stop_processing : ; @echo " Not supported on this platform" .PHONY : stop_processing endef else define TARGET_RULES $(MAKECMDGOALS) : ; @echo " Not supported on this platform" .PHONY : $(MAKECMDGOALS) endef endif endif endif endif endif # We handle the KBUILD type here explicitly to avoid expanding the # GET_KBUILD_TARGETS_Template too often, we know that one really is # defined so save expanding TARGET_RULES until we use it below. ifneq ($($(TARGET_NAME)_TYPE),KBUILD) ifeq ($(strip $(TARGET_RULES)),) ifneq ($(findstring makefile.unix,$(MAKEFILE_LIST)),) $(error No TARGET_RULES for '$(TARGET_NAME)' of unknown type $($(TARGET_NAME)_TYPE)) endif endif endif $(eval $(call TARGET_RULES,$(TARGET_NAME))) $(eval $(UPDATE_MAKEUP_Template)) # We don't really need to update these unless someone tries to use them, # since that is only likely to happen by including them in a target.* # definition this seems like the best place for these rules at present. $(foreach f,$(subst $(MAKEUP_CONFIG_DIR)/acsubst.,, \ $(wildcard $(MAKEUP_CONFIG_DIR)/acsubst.*)),Makefile.acsubst.$(f)) : \ Makefile.acsubst.% : $(MAKEUP_CONFIG_DIR)/acsubst.% $(top_builddir)/config.status @echo -n " * Updating $@... " @cd $(top_builddir) && ./config.status $@ $(foreach f,$(subst $(MAKEUP_AC_DIR)/acsubst.,, \ $(wildcard $(MAKEUP_AC_DIR)/acsubst.*)),Makefile.acsubst.$(f)) : \ Makefile.acsubst.% : $(MAKEUP_AC_DIR)/acsubst.% $(top_builddir)/config.status @echo -n " * Updating $@... " @cd $(top_builddir) && ./config.status $@ # Targets we should not to attempt building here. Makefile.acsubst : ; %.d : ; %.cpp : ; bit-babbler-0.9/Makeup/gmake-fragments/makefile.unix0000644000000000000000000006766314125243667017425 0ustar # makeup build rules for posix platforms with the GNU toolchain. # # Copyright 2003 - 2021, Ron # # This file is distributed under the terms of the GNU GPL version 2. # # As a special exception to the GPL, it may be distributed without # modification as a part of a program using a makeup generated build # system, under the same distribution terms as the program itself. # XXX # Should this whole file just collapse into makefile.target? # If anyone else needs common bits it can be selectively split out later. # # XXX # We keep this independent from MAKEUP_VERBOSE for now, since they both add # kind of orthogonal types of extra noise to the output. MAKEUP_VERBOSE is # largely for debugging makeup itself, whereas this is more about debugging # the project that is being built. You may want both, but maybe not always. ifeq ($(V),) QUIET_CC = @echo " CC $@"; QUIET_CXX = @echo " C++ $@"; QUIET_SYMVERS = @echo " SYMVERS $@"; QUIET_LD = @echo " LD $@"; QUIET_AR = @echo " AR $@"; QUIET_SWIG = @echo " SWIG $@"; QUIET_LEX = @echo " LEX $@"; QUIET_YACC = @echo " YACC $*.tab.c"; QUIET_YACXX = @echo " YACC $*.tab.cpp"; QUIET_WINDRES = @echo " WINDRES $@"; # $(call QUIET_LN,_target,_linkname) QUIET_LN = @echo " LN $(2) -> $(1)"; $$(LN_S) -f $(1) $(2) else QUIET_LN = $$(LN_S) -f $(1) $(2) endif # Pattern rules for intermediate targets. # ------------------------------------------------------------------ %.tab.cpp %.tab.hpp : %.ypp $(QUIET_YACXX) $(YACC) $(YACCFLAGS) $< -o $*.tab.cpp %.yy.cpp : %.lpp %.tab.hpp $(QUIET_LEX) $(LEX) $(LEXFLAGS) -o$@ $< %.tab.c %.tab.h : %.y $(QUIET_YACC) $(YACC) $(YACCFLAGS) $< -o $*.tab.c %.yy.c : %.l %.tab.h $(QUIET_LEX) $(LEX) $(LEXFLAGS) -o$@ $< %_res.o : %.rc $(QUIET_WINDRES) $(WINDRES) -i $< -o $@ $(WINRCFLAGS) %.o : %.cpp $(QUIET_CXX) $(CXX) -c -MMD $(PICFLAGS) $(CPPFLAGS) $(CXXFLAGS) -o $@ $< %.o : %.c $(QUIET_CC) $(CC) -c -MMD $(PICFLAGS) $(CPPFLAGS) $(CFLAGS) -o $@ $< %.o : %.cc $(QUIET_CXX) $(CXX) -c -MMD $(PICFLAGS) $(CPPFLAGS) $(CXXFLAGS) -o $@ $< # Keep intermediate targets for things like bison/flex/gettext # they'll only trigger a false remake if we don't. .SECONDARY: # Target file rules. # ------------------------------------------------------------------ # The boilerplate macros here are divided into two preliminary # sets, MACRO_Function's and MACRO_Template's. Macro _Function's # are aliases for a set of commands in a rule. They are a make # friendly way to provide the equivalent of parametric shell # functions for common commands in makefile rules. # # Macro _Template's are parametric boilerplate for complete # makefile rules. They must be passed to GNU make's $(eval) # function to be parsed as rules for manipulating a target. # $(call _EXTRA_FLAGS_Template,_target,_flags) # Used to add target, platform, and link-type specific flags to EXTRA$(_flags). # Supported configuration permutations (if passed 'target' and 'XFLAGS') are: # target_XFLAGS # target_{ELF,PE}_XFLAGS # target_{static,shared}_XFLAGS # target_{static,shared}_{ELF,PE}_XFLAGS # # With options set for each being applied to EXTRAXFLAGS in that order. define _EXTRA_FLAGS_Template $$(strip $$($(1)_$(2)) \ $$($(1)_$$(MAKEUP_HOST_ARCH)_$(2)) \ $$(if $$($(1)_LINKAGE),$$($(1)_$$($(1)_LINKAGE)_$(2)) \ $$($(1)_$$($(1)_LINKAGE)_$$(MAKEUP_HOST_ARCH)_$(2)), \ $$($(1)_$$(MAKEUP_DEFAULT_LINKAGE)_$(2)) \ $$($(1)_$$(MAKEUP_DEFAULT_LINKAGE)_$$(MAKEUP_HOST_ARCH)_$(2))) \ ) endef # $(EXTRA_FLAGS_Template) # Defines target-specific variables common to all build platforms. # Usually used as a simple substitution variable inside another # template which defines the build rules to be used. define EXTRA_FLAGS_Template $(1) : override EXTRACPPFLAGS += $(call _EXTRA_FLAGS_Template,$(1),CPPFLAGS) $(1) : override EXTRACFLAGS += $(call _EXTRA_FLAGS_Template,$(1),CFLAGS) $(1) : override EXTRACXXFLAGS += $(call _EXTRA_FLAGS_Template,$(1),CXXFLAGS) $(1) : override EXTRALDFLAGS += $(call _EXTRA_FLAGS_Template,$(1),LDFLAGS) $(1) : override EXTRAYACCFLAGS += $(call _EXTRA_FLAGS_Template,$(1),YACCFLAGS) $(1) : override EXTRALEXFLAGS += $(call _EXTRA_FLAGS_Template,$(1),LEXFLAGS) $(1) : override EXTRALIBS += $$(addprefix -l,$(call _EXTRA_FLAGS_Template,$(1),LIBS)) $(1) : $(1)_OBJS += $$($(1)_$$(MAKEUP_HOST_ARCH)_OBJS) $(1) : $$($(1)_$$(MAKEUP_HOST_ARCH)_OBJS) endef # $(EXECUTABLE_LINKAGE_Template) # Enables static executable linkage if specified by target_LINKAGE or # by MAKEUP_DEFAULT_LINKAGE if the target_LINKAGE parameter is unset. define EXECUTABLE_LINKAGE_Template $(1) : override EXTRALDFLAGS += $$(if $$(findstring static, $$($(1)_LINKAGE)), \ -static, \ $$(if $$($(1)_LINKAGE),, \ $$(if $$(findstring static, $$(MAKEUP_DEFAULT_LINKAGE)), \ -static))) endef C_LINKER = $(CC) C++_LINKER = $(CXX) # $(call LINK_ELF_EXECUTABLE_Template,_target,_linker) # Defines the makefile rules to link an executable _target for Linux. # Must be call'd with the name of the target executable and the _linker # to use (usually $(CC) or $(CXX) respectively for C and C++ targets). define LINK_ELF_EXECUTABLE_Template $(EXTRA_FLAGS_Template) $(EXECUTABLE_LINKAGE_Template) $(1) : PICFLAGS= $(1) : $$($(1)_OBJS) $$(QUIET_LD) $(2) $$(strip $$(LDFLAGS) $$(EXTRALDFLAGS)) -o $$@ $$($(1)_OBJS) $$(EXTRALIBS) $$(LIBS) -include $$($(1)_OBJS:.o=.d) endef # $(call LINK_PE_EXECUTABLE_Template,_target,_linker) # Defines the makefile rules to link an executable _target for mingw-cross. # Must be call'd with the name of the target executable and the _linker # to use (usually $(CC) or $(CXX) respectively for C and C++ targets). define LINK_PE_EXECUTABLE_Template $(EXTRA_FLAGS_Template) $(EXECUTABLE_LINKAGE_Template) $(1) : PICFLAGS= $(1) : $(1).exe $(1).exe : $$($(1)_OBJS) $$(QUIET_LD) $(2) $$(strip $$(LDFLAGS) $$(EXTRALDFLAGS)) -o $$@ $$($(1)_OBJS) $$(EXTRALIBS) $$(LIBS) -include $$($(1)_OBJS:.o=.d) .PHONY : $(1) endef # $(call LINK_staticLIBRARY_Template,_target,_linker) # Defines the makefile rules to link a static library _target. # Must be call'd with the base name of the target library and the _linker # to use (usually $(CC) or $(CXX) respectively for C and C++ targets). define LINK_staticLIBRARY_Template $(EXTRA_FLAGS_Template) $(1) : $(1).a $(1).a : $$($(1)_OBJS) ifneq ($$($(1)_OBJS),) $$(QUIET_AR) $$(AR) $$(ARFLAGS) $$@ $$($(1)_OBJS) else @echo " No $(1)_OBJS defined. Nothing to be done." endif -include $$($(1)_OBJS:.o=.d) .PHONY : $(1) endef # $(call LINK_sharedLIBRARY_Template,_target,_linker) # Defines the makefile rules to link a shared library _target. # Must be call'd with the base name of the target library and the _linker # to use (usually $(CC) or $(CXX) respectively for C and C++ targets). define LINK_sharedLIBRARY_Template $(call LINK_$(MAKEUP_HOST_ARCH)_sharedLIBRARY_Template,$(1),$(2)) endef # $(call LINK_ELF_sharedLIBRARY_Template,_target,_linker) # Defines the makefile rules to link a shared library _target for Linux. # Must be call'd with the base name of the target library and the _linker # to use (usually $(CC) or $(CXX) respectively for C and C++ targets). define LINK_ELF_sharedLIBRARY_Template $(EXTRA_FLAGS_Template) $(1) : $(1).so.$$($(1)_API_VERSION).$$($(1)_API_REVISION).$$($(1)_API_AGE) ifneq ($$($(1)_SYM_VERSION),) $(1).so.$$($(1)_API_VERSION).$$($(1)_API_REVISION).$$($(1)_API_AGE) : \ SYMVERS_SCRIPT = -Wl,--version-script,$(1).symvers endif $(1).so.$$($(1)_API_VERSION).$$($(1)_API_REVISION).$$($(1)_API_AGE) : $$($(1)_OBJS) $(1).symvers ifneq ($$($(1)_OBJS),) $$(QUIET_LD) $(2) $$(PICFLAGS) $$(LDFLAGS) $$(EXTRALDFLAGS) -shared -o $$@ \ -Wl,-soname,$(1).so.$$($(1)_API_VERSION) $$(SYMVERS_SCRIPT) \ $$($(1)_OBJS) $$(EXTRALIBS) $$(LIBS) $(call QUIET_LN,$$@,$(1).so.$$($(1)_API_VERSION)) $(call QUIET_LN,$$@,$(1).so) else @echo " No $(1)_OBJS defined. Nothing to be done." endif $(1).symvers : $$(MAKEUP_CONFIG_DIR)/target.$(1) ifneq ($$($(1)_SYM_VERSION),) $$(QUIET_SYMVERS) echo "$$($(1)_SYM_VERSION)" > $$@ else @: endif -include $$($(1)_OBJS:.o=.d) .PHONY : $(1) endef # $(call LINK_PE_sharedLIBRARY_Template,_target,_linker) # Defines the makefile rules to link a shared library _target for mingw-cross. # Must be call'd with the base name of the target library and the _linker # to use (usually $(CC) or $(CXX) respectively for C and C++ targets). define LINK_PE_sharedLIBRARY_Template $(EXTRA_FLAGS_Template) $(1) : $(1).dll $(1).dll : $$($(1)_PE_OBJS) $(1).dll : $$($(1)_OBJS) ifneq ($$($(1)_OBJS),) $$(QUIET_LD) $(2) $$(LDFLAGS) $$(EXTRALDFLAGS) -shared -o $$@ \ -Wl,--out-implib,$$@.a \ $$($(1)_OBJS) $$(EXTRALIBS) $$(LIBS) else @echo " No $(1)_OBJS defined. Nothing to be done." endif -include $$($(1)_OBJS:.o=.d) .PHONY : $(1) endef # $(call LINK_PLUGIN_Template,_target,_linker,_dllext) # Defines the makefile rules to link a shared library plugin _target. # Must be call'd with the base name of the target plugin, the _linker # to use (usually $(CC) or $(CXX) respectively for C and C++ targets), # and the file extension for plugins (usually '.so' for ELF and '.dll' # for PE plugins). define LINK_PLUGIN_Template $(EXTRA_FLAGS_Template) $(1) : $(1)$(3) $(1)$(3) : PICFLAGS=$(HOST_PICFLAGS) $(1)$(3) : $$($(1)_OBJS) $$(QUIET_LD) $(2) $$(PICFLAGS) $$(LDFLAGS) $$(EXTRALDFLAGS) -shared -o $$@ \ $$($(1)_OBJS) $$(EXTRALIBS) $$(LIBS) -include $$($(1)_OBJS:.o=.d) .PHONY : $(1) endef # Binding and language specific parameters for SWIGWRAP_Template SWIGWRAP_perl_EXTRACPPFLAGS = -I$(shell perl -e 'use Config; print $$Config{archlib};')/CORE SWIGWRAP_python_EXTRACPPFLAGS = -I$(shell python -c "import sys;print sys.prefix + '/include/python' + sys.version[:3]") SWIGWRAP_C++_EXT = cpp SWIGWRAP_C_EXT = c SWIGWRAP_C++_PARSER = -c++ SWIGWRAP_C_PARSER = # $(call SWIGWRAP_Template,_target,_module,_binding,_language,_prefix) # Defines the common rules for creating swig modules. The _target parameter # is a convenience and should normally equal $_binding-$_module. The _module # is the name of the swig module to build, _binding is the language that the # module will support, _language is what it is written in (C or C++), the # optional _prefix will be prepended to the compiled module name, and is # currently only required for python's particular world view. define SWIGWRAP_Template $(EXTRA_FLAGS_Template) $(1) : $(3)/$(5)$(2)$$(DSOEXT) $(1)_wrap.o : EXTRACPPFLAGS += $$(SWIGWRAP_$(3)_EXTRACPPFLAGS) $(1)_wrap.o : $$(MAKEUP_SWIG_WRAP_DIR)/$(1)_wrap.$$(SWIGWRAP_$(4)_EXT) $$(QUIET_CXX) $$(CXX) -c -MMD $$(PICFLAGS) $$(CPPFLAGS) $$(CXXFLAGS) -o $$@ $$< $(3)/$(5)$(2)$$(DSOEXT) : $$($(1)_OBJS) $(1)_wrap.o @mkdir -p $$(@D) $$(QUIET_LD) $$($(4)_LINKER) $$(PICFLAGS) $$(LDFLAGS) $$(EXTRALDFLAGS) -shared -o $$@ \ $$^ $$(EXTRALIBS) $$(LIBS) $$(SWIGWRAP_$(1)_EXTRA) $$(MAKEUP_SWIG_WRAP_DIR)/$(1)_wrap.$$(SWIGWRAP_$(4)_EXT) : $$(MAKEUP_SWIG_IF_DIR)/$(2).i $(if $(SWIG), mkdir -p $$(@D) $$(QUIET_SWIG) $(SWIG) -$(3) $$(SWIGWRAP_$(4)_PARSER) -o $$@ $$<, @echo "*** swig is not installed, cannot regenerate $(1) wrappers") $$(MAKEUP_SWIG_IF_DIR)/$(2).i : ; -include $$($(1)_OBJS:.o=.d) $(1)_wrap.d .PHONY : $(1) clean_$(1) : $$(MAKEUP_SWIG_WRAP_DIR)/$(1)_wrap.$$(SWIGWRAP_$(4)_EXT) clean_$(1) : CLEAN_$(1)_EXTRA = $$(if $$(wildcard $(3)), \ echo "\ \ \ \ Pruning path to: $(3)"; \ rmdir -p $(3) || true;) endef # $(call SWIG_perl_Template,_target,_module) # Defines all the rules required for a swig perl module _target. # The second parameter is merely passed as a convenience, it is not anticipated # this should be called with $_target != perl-$_module, or by anything other # than the SWIG_Template delegator. define SWIG_perl_Template SWIGWRAP_$(1)_EXTRA = $$(LN_S) -f \ ../$$(patsubst ./%,%,$$(MAKEUP_SWIG_WRAP_DIR))/$(2).pm \ perl $(call SWIGWRAP_Template,$(1),$(2),perl,$$($(1)_LANGUAGE)) install_vendor_$(1) : PERLARCH = $(shell perl -e 'use Config; print $$Config{installvendorarch};') install_vendor_$(1) : PERLLIB = $(shell perl -e 'use Config; print $$Config{installvendorlib};') install_vendor_$(1) : install_$(1) install_site_$(1) : PERLARCH = $(shell perl -e 'use Config; print $$Config{installsitearch};') install_site_$(1) : PERLLIB = $(shell perl -e 'use Config; print $$Config{installsitelib};') install_site_$(1) : install_$(1) install_$(1) : @$(call INSTALL_DATA_Template_Function,$(2).pm,$$(DESTDIR)$$(PERLARCH),perl,perl interface) @$(call INSTALL_DATA_Template_Function,$(2)$$(DSOEXT),$$(DESTDIR)$$(PERLLIB)/auto/$(2),perl,perl module) install : $$(if $$(shell test "$$(prefix)" = "/usr" || echo "no"), \ install_site_$(1), install_vendor_$(1)) .PHONY : install_vendor_$(1) install_site_$(1) install_$(1) install SWIGWRAP_CLEANUP_FILES = perl/$(2){$$(DSOEXT),.pm} \ $(1)_wrap.{o,d} $(call CLEAN_Template,$(1),$$(SWIGWRAP_CLEANUP_FILES)) endef # $(call SWIG_python_Template,_target,_module) # Defines all the rules required for a swig python module _target. # The second parameter is merely passed as a convenience, it is not anticipated # this should be called with $_target != python-$_module, or by anything other # than the SWIG_Template delegator. define SWIG_python_Template SWIGWRAP_$(1)_EXTRA = $$(LN_S) -f \ ../$$(patsubst ./%,%,$$(MAKEUP_SWIG_WRAP_DIR))/$(2).py \ python $(call SWIGWRAP_Template,$(1),$(2),python,$$($(1)_LANGUAGE),_) install_vendor_$(1) : PYARCH = /usr/lib/python$$(PYVERS) install_vendor_$(1) : PYLIB = /usr/lib/python$$(PYVERS)/lib-dynload install_vendor_$(1) : install_$(1) install_site_$(1) : PYARCH = /usr/local/lib/python$$(PYVERS)/site-packages install_site_$(1) : PYLIB = $$(PYARCH) install_site_$(1) : install_$(1) install_$(1) : PYVERS = $(shell python -c "import sys;print sys.version[:3]") install_$(1) : @$(call INSTALL_DATA_Template_Function,$(2).py,$$(DESTDIR)$$(PYARCH),python,python interface) @$(call INSTALL_DATA_Template_Function,_$(2)$$(DSOEXT),$$(DESTDIR)$$(PYLIB),python,python module) install : $$(if $$(shell test "$$(prefix)" = "/usr" || echo "no"), \ install_site_$(1), install_vendor_$(1)) .PHONY : install_vendor_$(1) install_site_$(1) install_$(1) install SWIGWRAP_CLEANUP_FILES = python/_$(2)$$(DSOEXT) \ python/$(2).py{,c} \ $(1)_wrap.{o,d} $(call CLEAN_Template,$(1),$$(SWIGWRAP_CLEANUP_FILES)) endef # $(CREATE_INSTALLDIR_Function) # Simple helper alias for the INSTALL_ templates. Used to create the # target's install directory if required. define CREATE_INSTALLDIR_Function $(call CREATE_DIR_Function,$$(DESTDIR)$$($(1)_INSTALLDIR)) endef # $(call INSTALL_BINARY_Template,_target[,_type][,_binary]) # Defines makefile install rules for _target. An optional (informative) # _type may be supplied for output to the user at install time. The _binary # parameter is also optional, it is the name of the file that will be # installed, defaulting to _target if omitted. define INSTALL_BINARY_Template install_$(1) : ifneq ($$($(1)_INSTALLDIR),) @set -e; \ $(CREATE_INSTALLDIR_Function) \ bin_name=$(3); \ if [ -z "$(3)" ]; then \ bin_name=$(1); \ fi; \ echo " Installing $(2): $$$${bin_name}"; \ $$(INSTALL_PROGRAM) $$$${bin_name} $$(DESTDIR)$$($(1)_INSTALLDIR) else @echo "$$@: No $(1)_INSTALLDIR defined, not installing" endif install : install_$(1) .PHONY : install_$(1) install endef # $(INSTALL_HEADER_Function) # Simple helper alias for INSTALL_DATA_Template_Function, used by # library templates to install the header files for their target. define INSTALL_HEADER_Function ifneq ($$($(1)_HEADERS),) ifneq ($$($(1)_HEADER_SRCDIR),) ifneq ($$($(1)_HEADER_INSTALLDIR),) @$(call INSTALL_DATA_Template_Function,$$($(1)_HEADERS),$$(DESTDIR)$$($(1)_HEADER_INSTALLDIR),$$($(1)_HEADER_SRCDIR),header) else @echo "$$@: No $(1)_HEADER_INSTALLDIR defined, not installing headers"; endif else @echo "$$@: No $(1)_HEADER_SRCDIR defined, not installing headers"; endif endif endef # $(INSTALL_configure_AC_FRAGMENT_Function) # Simple helper alias for INSTALL_DATA_Template_Function, used by library # templates to install any makeup configure ac-fragments for their target. # It is expected that a 'configure.$label' file will exist in the local # config dir for each $label listed in $target_AC_FRAGMENTS define INSTALL_configure_AC_FRAGMENT_Function $(call INSTALL_DATA_Template_Function,$$(addprefix configure.,$$($(1)_AC_FRAGMENTS)),$$(DESTDIR)$$(datadir)/makeup/ac-fragments,$$(MAKEUP_CONFIG_DIR),ac-fragment) endef # $(INSTALL_acsubst_AC_FRAGMENT_Function) # Simple helper alias for INSTALL_DATA_Template_Function, used by library # templates to install any makeup acsubst ac-fragments for their target. # If an 'acsubst.$label' file exists in the local config dir for any entry # in $target_AC_FRAGMENTS it will also be installed. This file is expected # to contain makefile substitution variables supported by configure.$label # and is not required to exist if there no substitution variable are needed. define INSTALL_acsubst_AC_FRAGMENT_Function $(call INSTALL_DATA_Template_Function,$$(subst $$(MAKEUP_CONFIG_DIR)/,,$$(wildcard $$(addprefix $$(MAKEUP_CONFIG_DIR)/acsubst.,$$($(1)_AC_FRAGMENTS)))),$$(DESTDIR)$$(datadir)/makeup/ac-fragments,$$(MAKEUP_CONFIG_DIR),ac-fragment) endef # $(INSTALL_AC_M4_Function) # Simple helper alias for INSTALL_DATA_Template_Function, used by # library templates to install any autoconf .m4 files for their target. define INSTALL_AC_M4_Function $(call INSTALL_DATA_Template_Function,$$(addsuffix .m4,$$($(1)_AC_M4)),$$(DESTDIR)$$(datadir)/aclocal,$$(MAKEUP_CONF_M4_DIR),ac.m4) endef # $(INSTALL_LIBRARY_Template) # Defines install rules for a library target. define INSTALL_LIBRARY_Template $(INSTALL_$(MAKEUP_DEFAULT_LINKAGE)LIBRARY_Template) install : need-setup-header need-setup-header : @touch "$$(top_builddir)/__need_setup" .PHONY : install-lib_$(1) install-dev-arch_$(1) install-dev-indep_$(1) \ install-lib install-dev install-dev-arch install-dev-indep install \ need-setup-header need-ldconfig endef # $(INSTALL_staticLIBRARY_Template) # Defines install rules for a static library target. define INSTALL_staticLIBRARY_Template $(call INSTALL_BINARY_Template,$(1),static library,$(1).a) install-dev_$(1) : $(INSTALL_HEADER_Function) install : install-dev install-lib : ; @: install-dev : install_$(1) install-dev-arch_$(1) install-dev-indep_$(1) install-dev-arch : install_$(1) install-dev-arch_$(1) install-dev-indep : install-dev-indep_$(1) endef # $(INSTALL_sharedLIBRARY_Template) # Defines install rules for a shared library target. define INSTALL_sharedLIBRARY_Template $(INSTALL_$(MAKEUP_HOST_ARCH)_sharedLIBRARY_Template) install : install-lib install-dev install-lib : install-lib_$(1) install-dev : install-dev-arch_$(1) install-dev-indep_$(1) install-dev-arch : install-dev-arch_$(1) install-dev-indep : install-dev-indep_$(1) endef # $(INSTALL_ELF_sharedLIBRARY_Template) # Defines makefile install rules for a Linux shared library target. define INSTALL_ELF_sharedLIBRARY_Template ifneq ($$($(1)_INSTALLDIR),) install : need-ldconfig install-lib_$(1) : @set -e; \ if test x"$$($(1)_OBJS)" != x; then \ $(CREATE_INSTALLDIR_Function) \ echo " Installing: $(1).so.$$($(1)_API_VERSION).$$($(1)_API_REVISION).$$($(1)_API_AGE)"; \ $$(INSTALL_PROGRAM) \ $(1).so.$$($(1)_API_VERSION).$$($(1)_API_REVISION).$$($(1)_API_AGE) \ $$(DESTDIR)$$($(1)_INSTALLDIR); \ echo " Installing: $(1).so.$$($(1)_API_VERSION)"; \ cp -d $(1).so.$$($(1)_API_VERSION) $$(DESTDIR)$$($(1)_INSTALLDIR); \ fi install-dev-arch_$(1) : @set -e; \ if test x"$$($(1)_OBJS)" != x; then \ $(CREATE_INSTALLDIR_Function) \ echo " Installing: $(1).so"; \ cp -d $(1).so $$(DESTDIR)$$($(1)_INSTALLDIR); \ fi need-ldconfig : @[ -n "$$(DESTDIR)" ] || touch "$$(top_builddir)/__need_ldconfig" else install-lib_$(1) \ install-dev-arch_$(1) : @echo "$$@: No $(1)_INSTALLDIR defined, not installing" endif install-dev-indep_$(1) : $(INSTALL_HEADER_Function) @$(INSTALL_configure_AC_FRAGMENT_Function) @$(INSTALL_acsubst_AC_FRAGMENT_Function) @$(INSTALL_AC_M4_Function) endef # $(INSTALL_PE_sharedLIBRARY_Template) # Defines makefile install rules for a mingw-cross shared library target. define INSTALL_PE_sharedLIBRARY_Template ifneq ($$($(1)_INSTALLDIR),) install-lib_$(1) : @set -e; \ if test x"$$($(1)_OBJS)" != x; then \ $(CREATE_INSTALLDIR_Function) \ echo " Installing: $(1).dll"; \ $$(INSTALL_PROGRAM) $(1).dll $$(DESTDIR)$$($(1)_INSTALLDIR); \ fi install-dev-arch_$(1) : @set -e; \ if test x"$$($(1)_OBJS)" != x; then \ $(CREATE_INSTALLDIR_Function) \ echo " Installing: $(1).dll.a"; \ $$(INSTALL_PROGRAM) $(1).dll.a $$(DESTDIR)$$($(1)_INSTALLDIR); \ fi else install-lib_$(1) \ install-dev-arch_$(1) : @echo "$$@: No $(1)_INSTALLDIR defined, not installing" endif install-dev-indep_$(1) : $(INSTALL_HEADER_Function) @$(INSTALL_configure_AC_FRAGMENT_Function) @$(INSTALL_acsubst_AC_FRAGMENT_Function) @$(INSTALL_AC_M4_Function) endef # $(CLEAN_staticLIBRARY_Template) # Computed helper alias for CLEAN_Template for static library targets. define CLEAN_staticLIBRARY_Template $(call CLEAN_Template,$(1),$(1).a) endef # $(CLEAN_sharedLIBRARY_Template) # Computed helper alias for CLEAN_Template for shared library targets. define CLEAN_sharedLIBRARY_Template $(call CLEAN_$(MAKEUP_HOST_ARCH)_sharedLIBRARY_Template,$(1)) endef # $(CLEAN_ELF_sharedLIBRARY_Template) # Computed helper alias for CLEAN_Template for ELF shared library targets. define CLEAN_ELF_sharedLIBRARY_Template CLEAN_ELF_sharedLIBRARY_SUFFIXES := {so*,symvers} $(call CLEAN_Template,$(1),$(1).$$(CLEAN_ELF_sharedLIBRARY_SUFFIXES)) endef # $(CLEAN_PE_sharedLIBRARY_Template) # Computed helper alias for CLEAN_Template for PE shared library targets. define CLEAN_PE_sharedLIBRARY_Template $(call CLEAN_Template,$(1),$(1).dll*) endef # $(CLEAN_unknownLIBRARY_Template) # Computed helper alias for CLEAN_Template for unconfigured library targets. define CLEAN_unknownLIBRARY_Template CLEAN_unknownLIBRARY_SUFFIXES := {a,dll*,so*,symvers} $(call CLEAN_Template,$(1),$(1).$$(CLEAN_unknownLIBRARY_SUFFIXES)) endef # $(call CLEAN_Template,_target[,_files]) # Defines makefile clean rules for _target. The _files parameter is an # optional (space-separated) list of filenames to remove in addition to # the standard build prerequisites. It defaults to the value of _target # if omitted. # # Note: we eval ${target_files} so that it may contain (and correctly # evaluate) shell brace expansions. eg. libfoo.{a,so*,dll*} define CLEAN_Template clean_$(1) : SHELL = bash clean_$(1) : BISON_SRC := $$(patsubst %.tab.o,%.y,$$(filter %.tab.o,$$($(1)_OBJS))) clean_$(1) : FLEX_SRC := $$(patsubst %.yy.o,%.l,$$(filter %.yy.o,$$($(1)_OBJS))) clean_$(1) : $(1)_OBJS += $$($(1)_$$(MAKEUP_HOST_ARCH)_OBJS) clean_$(1) : @target_files="$(2)"; \ if [ -z "$(2)" ]; then \ target_files=$(1); \ fi; \ for f in $$($(1)_OBJS) $$($(1)_OBJS:.o=.d) core \ $$(BISON_SRC:.y=.tab.c) $$(BISON_SRC:.y=.tab.h) \ $$(BISON_SRC:.y=.tab.cpp) $$(BISON_SRC:.y=.tab.hpp) \ $$(BISON_SRC:.y=.tab.d) \ $$(FLEX_SRC:.l=.yy.c) $$(FLEX_SRC:.l=.yy.cpp) \ $$(FLEX_SRC:.l=.yy.d) \ $$($(1)_EXTRACLEAN); \ do \ if [ -e "$$$$f" ]; then \ target_files="$$$${target_files} $$$$f"; \ fi; \ done; \ echo " Removing: $$$${target_files}"; \ eval $$(RM) $$$${target_files} @eval $$(CLEAN_$(1)_EXTRA) allclean_$(1) : clean_$(1) ifneq ($$($(1)_TARGET_DEPS),) @$(MAKE) $(MAKEUP_EXTRA_DEBUG) --no-print-directory \ $$(foreach t,$$($(1)_TARGET_DEPS),clean-$$(t)) else @ endif clean : clean_$(1) allclean : allclean_$(1) .PHONY : clean_$(1) clean allclean endef # $(call EXECUTABLE_Template,_target) # Defines all the rules required for an executable _target. define EXECUTABLE_Template $(call LINK_$(MAKEUP_HOST_ARCH)_EXECUTABLE_Template,$(1),$$($$($(1)_LANGUAGE)_LINKER)) $(call INSTALL_BINARY_Template,$(1),executable,$(1)$$(EXEEXT)) $(call CLEAN_Template,$(1),$(1)$$(EXEEXT)) endef # $(call LIBRARY_Template,_target) # Defines all the rules required for a library _target. define LIBRARY_Template $(call LINK_$(MAKEUP_DEFAULT_LINKAGE)LIBRARY_Template,$(1),$$($$($(1)_LANGUAGE)_LINKER)) $(INSTALL_LIBRARY_Template) $(CLEAN_$(MAKEUP_DEFAULT_LINKAGE)LIBRARY_Template) endef # $(call PLUGIN_Template,_target) # Defines all the rules required for a plugin _target. define PLUGIN_Template $(call LINK_PLUGIN_Template,$(1),$$($$($(1)_LANGUAGE)_LINKER),$$(DSOEXT)) $(call INSTALL_BINARY_Template,$(1),plugin,$(1)$$(DSOEXT)) $(call CLEAN_Template,$(1),$(1)$$(DSOEXT)) endef # $(call DATA_Template,_target) # Defines all the rules required for a 'data' _target. define DATA_Template $(1) : ; @: install_$(1) : ifneq ($$($(1)_DATA_FILES),) ifneq ($$($(1)_DATA_SRCDIR),) ifneq ($$($(1)_DATA_INSTALLDIR),) @$(call INSTALL_DATA_Template_Function,$$($(1)_DATA_FILES),$$(DESTDIR)$$($(1)_DATA_INSTALLDIR),$$($(1)_DATA_SRCDIR),$(1)) else @echo "$$@: No $(1)_DATA_INSTALLDIR defined, not installing" endif else @echo "$$@: No $(1)_DATA_SRCDIR defined, not installing" endif endif install : install_$(1) clean_$(1) : ; @: clean : clean_$(1) .PHONY : $(1) install_$(1) clean_$(1) install clean endef # $(call SWIG_Template,_target) # Defines all the rules required for a swig module _target. # Such targets are of the form $binding-$modulename, eg. perl-foo, python-bar # This macro simply delegates to the binding language specific rules for _target define SWIG_Template # This comment just ensures this macro never tests empty when it is defined $(eval SWIG_WRAPPER_TYPE = $(shell t="$(1)"; echo "$${t%%-*}")) $(if $(1), $(if $(SWIG_$(SWIG_WRAPPER_TYPE)_Template),, \ $(error Unknown SWIG target type '$(SWIG_WRAPPER_TYPE)'))) $(call SWIG_$(SWIG_WRAPPER_TYPE)_Template,$(1),$(shell t="$(1)"; echo "$${t#*-}")) endef # $(call KBUILD_MAKE_Function,_target,_rule) # Convenience helper for KBUILD_Template to check if the target dir exists # before attempting to invoke a kbuild rule for it. define KBUILD_MAKE_Function $(if $(KBUILD_TARGET_DIR_$(1)),$(MAKE) -C $(KBUILD_TARGET_DIR_$(1)) $(2), \ @echo "No dir for $(1) target $(2)") endef # $(call KBUILD_Template,_target) # Defines the rules for building a kernel module using kbuild. Unfortunately # kbuild cannot currently support the concept of a separate build dir, so these # builds will always result in the target module and its precursors ending up # in the source dirs. (it has a O= option, but that works about as well as it # is documented, so I'm not optimistic this will be fixed soon) # # The $(MAKEUP_CONFIG_DIR)/target.$(_target) file should contain the required # kbuild definitions as well as the makup target description. Only $target_DIR # is supported for this type. It is required to indicate the subdir which # contains the kernel module source. define KBUILD_Template $(GET_KBUILD_TARGETS_Template) $(1) : $(KBUILD_TARGET_DIR_$(1))/Makefile +$(call KBUILD_MAKE_Function,$(1)) $(KBUILD_TARGET_DIR_$(1))/Makefile : $$(MAKEUP_CONFIG_DIR)/target.$(1) \ $$(MAKEUP_GMAKE_DIR)/makefile.kbuild \ $$(top_builddir)/config.status $(if $(KBUILD_TARGET_DIR_$(1)), \ @echo -n " * Updating $$@... " && \ cd $$(top_builddir) && ./config.status $$@, \ @echo "No dir for $(1), not updating Makefile") install_$(1) : +$(call KBUILD_MAKE_Function,$(1),install) clean_$(1) : -+$(call KBUILD_MAKE_Function,$(1),clean) install : install_$(1) clean : clean_$(1) .PHONY : $(1) install_$(1) clean_$(1) install clean endef bit-babbler-0.9/aclocal.m40000644000000000000000000041201514136173163012265 0ustar # generated automatically by aclocal 1.16.3 -*- Autoconf -*- # Copyright (C) 1996-2020 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) # gettext.m4 serial 71 (gettext-0.20.2) dnl Copyright (C) 1995-2014, 2016, 2018-2020 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl dnl This file can be used in projects which are not available under dnl the GNU General Public License or the GNU Lesser General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Lesser General Public License, and the rest of the GNU dnl gettext package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1995-2000. dnl Bruno Haible , 2000-2006, 2008-2010. dnl Macro to add for using GNU gettext. dnl Usage: AM_GNU_GETTEXT([INTLSYMBOL], [NEEDSYMBOL], [INTLDIR]). dnl INTLSYMBOL must be one of 'external', 'use-libtool'. dnl INTLSYMBOL should be 'external' for packages other than GNU gettext, and dnl 'use-libtool' for the packages 'gettext-runtime' and 'gettext-tools'. dnl If INTLSYMBOL is 'use-libtool', then a libtool library dnl $(top_builddir)/intl/libintl.la will be created (shared and/or static, dnl depending on --{enable,disable}-{shared,static} and on the presence of dnl AM-DISABLE-SHARED). dnl If NEEDSYMBOL is specified and is 'need-ngettext', then GNU gettext dnl implementations (in libc or libintl) without the ngettext() function dnl will be ignored. If NEEDSYMBOL is specified and is dnl 'need-formatstring-macros', then GNU gettext implementations that don't dnl support the ISO C 99 formatstring macros will be ignored. dnl INTLDIR is used to find the intl libraries. If empty, dnl the value '$(top_builddir)/intl/' is used. dnl dnl The result of the configuration is one of three cases: dnl 1) GNU gettext, as included in the intl subdirectory, will be compiled dnl and used. dnl Catalog format: GNU --> install in $(datadir) dnl Catalog extension: .mo after installation, .gmo in source tree dnl 2) GNU gettext has been found in the system's C library. dnl Catalog format: GNU --> install in $(datadir) dnl Catalog extension: .mo after installation, .gmo in source tree dnl 3) No internationalization, always use English msgid. dnl Catalog format: none dnl Catalog extension: none dnl If INTLSYMBOL is 'external', only cases 2 and 3 can occur. dnl The use of .gmo is historical (it was needed to avoid overwriting the dnl GNU format catalogs when building on a platform with an X/Open gettext), dnl but we keep it in order not to force irrelevant filename changes on the dnl maintainers. dnl AC_DEFUN([AM_GNU_GETTEXT], [ dnl Argument checking. ifelse([$1], [], , [ifelse([$1], [external], , [ifelse([$1], [use-libtool], , [errprint([ERROR: invalid first argument to AM_GNU_GETTEXT ])])])]) ifelse(ifelse([$1], [], [old])[]ifelse([$1], [no-libtool], [old]), [old], [errprint([ERROR: Use of AM_GNU_GETTEXT without [external] argument is no longer supported. ])]) ifelse([$2], [], , [ifelse([$2], [need-ngettext], , [ifelse([$2], [need-formatstring-macros], , [errprint([ERROR: invalid second argument to AM_GNU_GETTEXT ])])])]) define([gt_included_intl], ifelse([$1], [external], [no], [yes])) gt_NEEDS_INIT AM_GNU_GETTEXT_NEED([$2]) AC_REQUIRE([AM_PO_SUBDIRS])dnl ifelse(gt_included_intl, yes, [ AC_REQUIRE([AM_INTL_SUBDIR])dnl ]) dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) dnl Sometimes libintl requires libiconv, so first search for libiconv. dnl Ideally we would do this search only after the dnl if test "$USE_NLS" = "yes"; then dnl if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then dnl tests. But if configure.in invokes AM_ICONV after AM_GNU_GETTEXT dnl the configure script would need to contain the same shell code dnl again, outside any 'if'. There are two solutions: dnl - Invoke AM_ICONV_LINKFLAGS_BODY here, outside any 'if'. dnl - Control the expansions in more detail using AC_PROVIDE_IFELSE. dnl Since AC_PROVIDE_IFELSE is not documented, we avoid it. ifelse(gt_included_intl, yes, , [ AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) ]) dnl Sometimes, on Mac OS X, libintl requires linking with CoreFoundation. gt_INTL_MACOSX dnl Set USE_NLS. AC_REQUIRE([AM_NLS]) ifelse(gt_included_intl, yes, [ BUILD_INCLUDED_LIBINTL=no USE_INCLUDED_LIBINTL=no ]) LIBINTL= LTLIBINTL= POSUB= dnl Add a version number to the cache macros. case " $gt_needs " in *" need-formatstring-macros "*) gt_api_version=3 ;; *" need-ngettext "*) gt_api_version=2 ;; *) gt_api_version=1 ;; esac gt_func_gnugettext_libc="gt_cv_func_gnugettext${gt_api_version}_libc" gt_func_gnugettext_libintl="gt_cv_func_gnugettext${gt_api_version}_libintl" dnl If we use NLS figure out what method if test "$USE_NLS" = "yes"; then gt_use_preinstalled_gnugettext=no ifelse(gt_included_intl, yes, [ AC_MSG_CHECKING([whether included gettext is requested]) AC_ARG_WITH([included-gettext], [ --with-included-gettext use the GNU gettext library included here], nls_cv_force_use_gnu_gettext=$withval, nls_cv_force_use_gnu_gettext=no) AC_MSG_RESULT([$nls_cv_force_use_gnu_gettext]) nls_cv_use_gnu_gettext="$nls_cv_force_use_gnu_gettext" if test "$nls_cv_force_use_gnu_gettext" != "yes"; then ]) dnl User does not insist on using GNU NLS library. Figure out what dnl to use. If GNU gettext is available we use this. Else we have dnl to fall back to GNU NLS library. if test $gt_api_version -ge 3; then gt_revision_test_code=' #ifndef __GNU_GETTEXT_SUPPORTED_REVISION #define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) #endif changequote(,)dnl typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; changequote([,])dnl ' else gt_revision_test_code= fi if test $gt_api_version -ge 2; then gt_expression_test_code=' + * ngettext ("", "", 0)' else gt_expression_test_code= fi AC_CACHE_CHECK([for GNU gettext in libc], [$gt_func_gnugettext_libc], [AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[ #include #ifndef __GNU_GETTEXT_SUPPORTED_REVISION extern int _nl_msg_cat_cntr; extern int *_nl_domain_bindings; #define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_domain_bindings) #else #define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 #endif $gt_revision_test_code ]], [[ bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION ]])], [eval "$gt_func_gnugettext_libc=yes"], [eval "$gt_func_gnugettext_libc=no"])]) if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then dnl Sometimes libintl requires libiconv, so first search for libiconv. ifelse(gt_included_intl, yes, , [ AM_ICONV_LINK ]) dnl Search for libintl and define LIBINTL, LTLIBINTL and INCINTL dnl accordingly. Don't use AC_LIB_LINKFLAGS_BODY([intl],[iconv]) dnl because that would add "-liconv" to LIBINTL and LTLIBINTL dnl even if libiconv doesn't exist. AC_LIB_LINKFLAGS_BODY([intl]) AC_CACHE_CHECK([for GNU gettext in libintl], [$gt_func_gnugettext_libintl], [gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $INCINTL" gt_save_LIBS="$LIBS" LIBS="$LIBS $LIBINTL" dnl Now see whether libintl exists and does not depend on libiconv. AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[ #include #ifndef __GNU_GETTEXT_SUPPORTED_REVISION extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); #define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_expand_alias ("")) #else #define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 #endif $gt_revision_test_code ]], [[ bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION ]])], [eval "$gt_func_gnugettext_libintl=yes"], [eval "$gt_func_gnugettext_libintl=no"]) dnl Now see whether libintl exists and depends on libiconv. if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" != yes; } && test -n "$LIBICONV"; then LIBS="$LIBS $LIBICONV" AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[ #include #ifndef __GNU_GETTEXT_SUPPORTED_REVISION extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); #define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_expand_alias ("")) #else #define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 #endif $gt_revision_test_code ]], [[ bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION ]])], [LIBINTL="$LIBINTL $LIBICONV" LTLIBINTL="$LTLIBINTL $LTLIBICONV" eval "$gt_func_gnugettext_libintl=yes" ]) fi CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS"]) fi dnl If an already present or preinstalled GNU gettext() is found, dnl use it. But if this macro is used in GNU gettext, and GNU dnl gettext is already preinstalled in libintl, we update this dnl libintl. (Cf. the install rule in intl/Makefile.in.) if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" = "yes"; } \ || { { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; } \ && test "$PACKAGE" != gettext-runtime \ && test "$PACKAGE" != gettext-tools; }; then gt_use_preinstalled_gnugettext=yes else dnl Reset the values set by searching for libintl. LIBINTL= LTLIBINTL= INCINTL= fi ifelse(gt_included_intl, yes, [ if test "$gt_use_preinstalled_gnugettext" != "yes"; then dnl GNU gettext is not found in the C library. dnl Fall back on included GNU gettext library. nls_cv_use_gnu_gettext=yes fi fi if test "$nls_cv_use_gnu_gettext" = "yes"; then dnl Mark actions used to generate GNU NLS library. BUILD_INCLUDED_LIBINTL=yes USE_INCLUDED_LIBINTL=yes LIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.la $LIBICONV $LIBTHREAD" LTLIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.la $LTLIBICONV $LTLIBTHREAD" LIBS=`echo " $LIBS " | sed -e 's/ -lintl / /' -e 's/^ //' -e 's/ $//'` fi CATOBJEXT= if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then dnl Mark actions to use GNU gettext tools. CATOBJEXT=.gmo fi ]) if test -n "$INTL_MACOSX_LIBS"; then if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then dnl Some extra flags are needed during linking. LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" fi fi if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then AC_DEFINE([ENABLE_NLS], [1], [Define to 1 if translation of program messages to the user's native language is requested.]) else USE_NLS=no fi fi AC_MSG_CHECKING([whether to use NLS]) AC_MSG_RESULT([$USE_NLS]) if test "$USE_NLS" = "yes"; then AC_MSG_CHECKING([where the gettext function comes from]) if test "$gt_use_preinstalled_gnugettext" = "yes"; then if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then gt_source="external libintl" else gt_source="libc" fi else gt_source="included intl directory" fi AC_MSG_RESULT([$gt_source]) fi if test "$USE_NLS" = "yes"; then if test "$gt_use_preinstalled_gnugettext" = "yes"; then if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then AC_MSG_CHECKING([how to link with libintl]) AC_MSG_RESULT([$LIBINTL]) AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCINTL]) fi dnl For backward compatibility. Some packages may be using this. AC_DEFINE([HAVE_GETTEXT], [1], [Define if the GNU gettext() function is already present or preinstalled.]) AC_DEFINE([HAVE_DCGETTEXT], [1], [Define if the GNU dcgettext() function is already present or preinstalled.]) fi dnl We need to process the po/ directory. POSUB=po fi ifelse(gt_included_intl, yes, [ dnl In GNU gettext we have to set BUILD_INCLUDED_LIBINTL to 'yes' dnl because some of the testsuite requires it. BUILD_INCLUDED_LIBINTL=yes dnl Make all variables we use known to autoconf. AC_SUBST([BUILD_INCLUDED_LIBINTL]) AC_SUBST([USE_INCLUDED_LIBINTL]) AC_SUBST([CATOBJEXT]) ]) dnl For backward compatibility. Some Makefiles may be using this. INTLLIBS="$LIBINTL" AC_SUBST([INTLLIBS]) dnl Make all documented variables known to autoconf. AC_SUBST([LIBINTL]) AC_SUBST([LTLIBINTL]) AC_SUBST([POSUB]) ]) dnl gt_NEEDS_INIT ensures that the gt_needs variable is initialized. m4_define([gt_NEEDS_INIT], [ m4_divert_text([DEFAULTS], [gt_needs=]) m4_define([gt_NEEDS_INIT], []) ]) dnl Usage: AM_GNU_GETTEXT_NEED([NEEDSYMBOL]) AC_DEFUN([AM_GNU_GETTEXT_NEED], [ m4_divert_text([INIT_PREPARE], [gt_needs="$gt_needs $1"]) ]) dnl Usage: AM_GNU_GETTEXT_VERSION([gettext-version]) AC_DEFUN([AM_GNU_GETTEXT_VERSION], []) dnl Usage: AM_GNU_GETTEXT_REQUIRE_VERSION([gettext-version]) AC_DEFUN([AM_GNU_GETTEXT_REQUIRE_VERSION], []) # host-cpu-c-abi.m4 serial 13 dnl Copyright (C) 2002-2020 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Bruno Haible and Sam Steingold. dnl Sets the HOST_CPU variable to the canonical name of the CPU. dnl Sets the HOST_CPU_C_ABI variable to the canonical name of the CPU with its dnl C language ABI (application binary interface). dnl Also defines __${HOST_CPU}__ and __${HOST_CPU_C_ABI}__ as C macros in dnl config.h. dnl dnl This canonical name can be used to select a particular assembly language dnl source file that will interoperate with C code on the given host. dnl dnl For example: dnl * 'i386' and 'sparc' are different canonical names, because code for i386 dnl will not run on SPARC CPUs and vice versa. They have different dnl instruction sets. dnl * 'sparc' and 'sparc64' are different canonical names, because code for dnl 'sparc' and code for 'sparc64' cannot be linked together: 'sparc' code dnl contains 32-bit instructions, whereas 'sparc64' code contains 64-bit dnl instructions. A process on a SPARC CPU can be in 32-bit mode or in 64-bit dnl mode, but not both. dnl * 'mips' and 'mipsn32' are different canonical names, because they use dnl different argument passing and return conventions for C functions, and dnl although the instruction set of 'mips' is a large subset of the dnl instruction set of 'mipsn32'. dnl * 'mipsn32' and 'mips64' are different canonical names, because they use dnl different sizes for the C types like 'int' and 'void *', and although dnl the instruction sets of 'mipsn32' and 'mips64' are the same. dnl * The same canonical name is used for different endiannesses. You can dnl determine the endianness through preprocessor symbols: dnl - 'arm': test __ARMEL__. dnl - 'mips', 'mipsn32', 'mips64': test _MIPSEB vs. _MIPSEL. dnl - 'powerpc64': test _BIG_ENDIAN vs. _LITTLE_ENDIAN. dnl * The same name 'i386' is used for CPUs of type i386, i486, i586 dnl (Pentium), AMD K7, Pentium II, Pentium IV, etc., because dnl - Instructions that do not exist on all of these CPUs (cmpxchg, dnl MMX, SSE, SSE2, 3DNow! etc.) are not frequently used. If your dnl assembly language source files use such instructions, you will dnl need to make the distinction. dnl - Speed of execution of the common instruction set is reasonable across dnl the entire family of CPUs. If you have assembly language source files dnl that are optimized for particular CPU types (like GNU gmp has), you dnl will need to make the distinction. dnl See . AC_DEFUN([gl_HOST_CPU_C_ABI], [ AC_REQUIRE([AC_CANONICAL_HOST]) AC_REQUIRE([gl_C_ASM]) AC_CACHE_CHECK([host CPU and C ABI], [gl_cv_host_cpu_c_abi], [case "$host_cpu" in changequote(,)dnl i[34567]86 ) changequote([,])dnl gl_cv_host_cpu_c_abi=i386 ;; x86_64 ) # On x86_64 systems, the C compiler may be generating code in one of # these ABIs: # - 64-bit instruction set, 64-bit pointers, 64-bit 'long': x86_64. # - 64-bit instruction set, 64-bit pointers, 32-bit 'long': x86_64 # with native Windows (mingw, MSVC). # - 64-bit instruction set, 32-bit pointers, 32-bit 'long': x86_64-x32. # - 32-bit instruction set, 32-bit pointers, 32-bit 'long': i386. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if (defined __x86_64__ || defined __amd64__ \ || defined _M_X64 || defined _M_AMD64) int ok; #else error fail #endif ]])], [AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined __ILP32__ || defined _ILP32 int ok; #else error fail #endif ]])], [gl_cv_host_cpu_c_abi=x86_64-x32], [gl_cv_host_cpu_c_abi=x86_64])], [gl_cv_host_cpu_c_abi=i386]) ;; changequote(,)dnl alphaev[4-8] | alphaev56 | alphapca5[67] | alphaev6[78] ) changequote([,])dnl gl_cv_host_cpu_c_abi=alpha ;; arm* | aarch64 ) # Assume arm with EABI. # On arm64 systems, the C compiler may be generating code in one of # these ABIs: # - aarch64 instruction set, 64-bit pointers, 64-bit 'long': arm64. # - aarch64 instruction set, 32-bit pointers, 32-bit 'long': arm64-ilp32. # - 32-bit instruction set, 32-bit pointers, 32-bit 'long': arm or armhf. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#ifdef __aarch64__ int ok; #else error fail #endif ]])], [AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined __ILP32__ || defined _ILP32 int ok; #else error fail #endif ]])], [gl_cv_host_cpu_c_abi=arm64-ilp32], [gl_cv_host_cpu_c_abi=arm64])], [# Don't distinguish little-endian and big-endian arm, since they # don't require different machine code for simple operations and # since the user can distinguish them through the preprocessor # defines __ARMEL__ vs. __ARMEB__. # But distinguish arm which passes floating-point arguments and # return values in integer registers (r0, r1, ...) - this is # gcc -mfloat-abi=soft or gcc -mfloat-abi=softfp - from arm which # passes them in float registers (s0, s1, ...) and double registers # (d0, d1, ...) - this is gcc -mfloat-abi=hard. GCC 4.6 or newer # sets the preprocessor defines __ARM_PCS (for the first case) and # __ARM_PCS_VFP (for the second case), but older GCC does not. echo 'double ddd; void func (double dd) { ddd = dd; }' > conftest.c # Look for a reference to the register d0 in the .s file. AC_TRY_COMMAND(${CC-cc} $CFLAGS $CPPFLAGS $gl_c_asm_opt conftest.c) >/dev/null 2>&1 if LC_ALL=C grep 'd0,' conftest.$gl_asmext >/dev/null; then gl_cv_host_cpu_c_abi=armhf else gl_cv_host_cpu_c_abi=arm fi rm -f conftest* ]) ;; hppa1.0 | hppa1.1 | hppa2.0* | hppa64 ) # On hppa, the C compiler may be generating 32-bit code or 64-bit # code. In the latter case, it defines _LP64 and __LP64__. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#ifdef __LP64__ int ok; #else error fail #endif ]])], [gl_cv_host_cpu_c_abi=hppa64], [gl_cv_host_cpu_c_abi=hppa]) ;; ia64* ) # On ia64 on HP-UX, the C compiler may be generating 64-bit code or # 32-bit code. In the latter case, it defines _ILP32. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#ifdef _ILP32 int ok; #else error fail #endif ]])], [gl_cv_host_cpu_c_abi=ia64-ilp32], [gl_cv_host_cpu_c_abi=ia64]) ;; mips* ) # We should also check for (_MIPS_SZPTR == 64), but gcc keeps this # at 32. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined _MIPS_SZLONG && (_MIPS_SZLONG == 64) int ok; #else error fail #endif ]])], [gl_cv_host_cpu_c_abi=mips64], [# In the n32 ABI, _ABIN32 is defined, _ABIO32 is not defined (but # may later get defined by ), and _MIPS_SIM == _ABIN32. # In the 32 ABI, _ABIO32 is defined, _ABIN32 is not defined (but # may later get defined by ), and _MIPS_SIM == _ABIO32. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if (_MIPS_SIM == _ABIN32) int ok; #else error fail #endif ]])], [gl_cv_host_cpu_c_abi=mipsn32], [gl_cv_host_cpu_c_abi=mips])]) ;; powerpc* ) # Different ABIs are in use on AIX vs. Mac OS X vs. Linux,*BSD. # No need to distinguish them here; the caller may distinguish # them based on the OS. # On powerpc64 systems, the C compiler may still be generating # 32-bit code. And on powerpc-ibm-aix systems, the C compiler may # be generating 64-bit code. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined __powerpc64__ || defined _ARCH_PPC64 int ok; #else error fail #endif ]])], [# On powerpc64, there are two ABIs on Linux: The AIX compatible # one and the ELFv2 one. The latter defines _CALL_ELF=2. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined _CALL_ELF && _CALL_ELF == 2 int ok; #else error fail #endif ]])], [gl_cv_host_cpu_c_abi=powerpc64-elfv2], [gl_cv_host_cpu_c_abi=powerpc64]) ], [gl_cv_host_cpu_c_abi=powerpc]) ;; rs6000 ) gl_cv_host_cpu_c_abi=powerpc ;; riscv32 | riscv64 ) # There are 2 architectures (with variants): rv32* and rv64*. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if __riscv_xlen == 64 int ok; #else error fail #endif ]])], [cpu=riscv64], [cpu=riscv32]) # There are 6 ABIs: ilp32, ilp32f, ilp32d, lp64, lp64f, lp64d. # Size of 'long' and 'void *': AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined __LP64__ int ok; #else error fail #endif ]])], [main_abi=lp64], [main_abi=ilp32]) # Float ABIs: # __riscv_float_abi_double: # 'float' and 'double' are passed in floating-point registers. # __riscv_float_abi_single: # 'float' are passed in floating-point registers. # __riscv_float_abi_soft: # No values are passed in floating-point registers. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined __riscv_float_abi_double int ok; #else error fail #endif ]])], [float_abi=d], [AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined __riscv_float_abi_single int ok; #else error fail #endif ]])], [float_abi=f], [float_abi='']) ]) gl_cv_host_cpu_c_abi="${cpu}-${main_abi}${float_abi}" ;; s390* ) # On s390x, the C compiler may be generating 64-bit (= s390x) code # or 31-bit (= s390) code. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined __LP64__ || defined __s390x__ int ok; #else error fail #endif ]])], [gl_cv_host_cpu_c_abi=s390x], [gl_cv_host_cpu_c_abi=s390]) ;; sparc | sparc64 ) # UltraSPARCs running Linux have `uname -m` = "sparc64", but the # C compiler still generates 32-bit code. AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#if defined __sparcv9 || defined __arch64__ int ok; #else error fail #endif ]])], [gl_cv_host_cpu_c_abi=sparc64], [gl_cv_host_cpu_c_abi=sparc]) ;; *) gl_cv_host_cpu_c_abi="$host_cpu" ;; esac ]) dnl In most cases, $HOST_CPU and $HOST_CPU_C_ABI are the same. HOST_CPU=`echo "$gl_cv_host_cpu_c_abi" | sed -e 's/-.*//'` HOST_CPU_C_ABI="$gl_cv_host_cpu_c_abi" AC_SUBST([HOST_CPU]) AC_SUBST([HOST_CPU_C_ABI]) # This was # AC_DEFINE_UNQUOTED([__${HOST_CPU}__]) # AC_DEFINE_UNQUOTED([__${HOST_CPU_C_ABI}__]) # earlier, but KAI C++ 3.2d doesn't like this. sed -e 's/-/_/g' >> confdefs.h < #include ]], [[iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd);]])], [am_cv_func_iconv=yes]) if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[ #include #include ]], [[iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd);]])], [am_cv_lib_iconv=yes] [am_cv_func_iconv=yes]) LIBS="$am_save_LIBS" fi ]) if test "$am_cv_func_iconv" = yes; then AC_CACHE_CHECK([for working iconv], [am_cv_func_iconv_works], [ dnl This tests against bugs in AIX 5.1, AIX 6.1..7.1, HP-UX 11.11, dnl Solaris 10. am_save_LIBS="$LIBS" if test $am_cv_lib_iconv = yes; then LIBS="$LIBS $LIBICONV" fi am_cv_func_iconv_works=no for ac_iconv_const in '' 'const'; do AC_RUN_IFELSE( [AC_LANG_PROGRAM( [[ #include #include #ifndef ICONV_CONST # define ICONV_CONST $ac_iconv_const #endif ]], [[int result = 0; /* Test against AIX 5.1 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_utf8_to_88591 = iconv_open ("ISO8859-1", "UTF-8"); if (cd_utf8_to_88591 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\342\202\254"; /* EURO SIGN */ char buf[10]; ICONV_CONST char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_utf8_to_88591, &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) result |= 1; iconv_close (cd_utf8_to_88591); } } /* Test against Solaris 10 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_ascii_to_88591 = iconv_open ("ISO8859-1", "646"); if (cd_ascii_to_88591 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\263"; char buf[10]; ICONV_CONST char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_ascii_to_88591, &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) result |= 2; iconv_close (cd_ascii_to_88591); } } /* Test against AIX 6.1..7.1 bug: Buffer overrun. */ { iconv_t cd_88591_to_utf8 = iconv_open ("UTF-8", "ISO-8859-1"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\304"; static char buf[2] = { (char)0xDE, (char)0xAD }; ICONV_CONST char *inptr = input; size_t inbytesleft = 1; char *outptr = buf; size_t outbytesleft = 1; size_t res = iconv (cd_88591_to_utf8, &inptr, &inbytesleft, &outptr, &outbytesleft); if (res != (size_t)(-1) || outptr - buf > 1 || buf[1] != (char)0xAD) result |= 4; iconv_close (cd_88591_to_utf8); } } #if 0 /* This bug could be worked around by the caller. */ /* Test against HP-UX 11.11 bug: Positive return value instead of 0. */ { iconv_t cd_88591_to_utf8 = iconv_open ("utf8", "iso88591"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\304rger mit b\366sen B\374bchen ohne Augenma\337"; char buf[50]; ICONV_CONST char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_88591_to_utf8, &inptr, &inbytesleft, &outptr, &outbytesleft); if ((int)res > 0) result |= 8; iconv_close (cd_88591_to_utf8); } } #endif /* Test against HP-UX 11.11 bug: No converter from EUC-JP to UTF-8 is provided. */ { /* Try standardized names. */ iconv_t cd1 = iconv_open ("UTF-8", "EUC-JP"); /* Try IRIX, OSF/1 names. */ iconv_t cd2 = iconv_open ("UTF-8", "eucJP"); /* Try AIX names. */ iconv_t cd3 = iconv_open ("UTF-8", "IBM-eucJP"); /* Try HP-UX names. */ iconv_t cd4 = iconv_open ("utf8", "eucJP"); if (cd1 == (iconv_t)(-1) && cd2 == (iconv_t)(-1) && cd3 == (iconv_t)(-1) && cd4 == (iconv_t)(-1)) result |= 16; if (cd1 != (iconv_t)(-1)) iconv_close (cd1); if (cd2 != (iconv_t)(-1)) iconv_close (cd2); if (cd3 != (iconv_t)(-1)) iconv_close (cd3); if (cd4 != (iconv_t)(-1)) iconv_close (cd4); } return result; ]])], [am_cv_func_iconv_works=yes], , [case "$host_os" in aix* | hpux*) am_cv_func_iconv_works="guessing no" ;; *) am_cv_func_iconv_works="guessing yes" ;; esac]) test "$am_cv_func_iconv_works" = no || break done LIBS="$am_save_LIBS" ]) case "$am_cv_func_iconv_works" in *no) am_func_iconv=no am_cv_lib_iconv=no ;; *) am_func_iconv=yes ;; esac else am_func_iconv=no am_cv_lib_iconv=no fi if test "$am_func_iconv" = yes; then AC_DEFINE([HAVE_ICONV], [1], [Define if you have the iconv() function and it works.]) fi if test "$am_cv_lib_iconv" = yes; then AC_MSG_CHECKING([how to link with libiconv]) AC_MSG_RESULT([$LIBICONV]) else dnl If $LIBICONV didn't lead to a usable library, we don't need $INCICONV dnl either. CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi AC_SUBST([LIBICONV]) AC_SUBST([LTLIBICONV]) ]) dnl Define AM_ICONV using AC_DEFUN_ONCE for Autoconf >= 2.64, in order to dnl avoid warnings like dnl "warning: AC_REQUIRE: `AM_ICONV' was expanded before it was required". dnl This is tricky because of the way 'aclocal' is implemented: dnl - It requires defining an auxiliary macro whose name ends in AC_DEFUN. dnl Otherwise aclocal's initial scan pass would miss the macro definition. dnl - It requires a line break inside the AC_DEFUN_ONCE and AC_DEFUN expansions. dnl Otherwise aclocal would emit many "Use of uninitialized value $1" dnl warnings. m4_define([gl_iconv_AC_DEFUN], m4_version_prereq([2.64], [[AC_DEFUN_ONCE( [$1], [$2])]], [m4_ifdef([gl_00GNULIB], [[AC_DEFUN_ONCE( [$1], [$2])]], [[AC_DEFUN( [$1], [$2])]])])) gl_iconv_AC_DEFUN([AM_ICONV], [ AM_ICONV_LINK if test "$am_cv_func_iconv" = yes; then AC_MSG_CHECKING([for iconv declaration]) AC_CACHE_VAL([am_cv_proto_iconv], [ AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [[ #include #include extern #ifdef __cplusplus "C" #endif #if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); #else size_t iconv(); #endif ]], [[]])], [am_cv_proto_iconv_arg1=""], [am_cv_proto_iconv_arg1="const"]) am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);"]) am_cv_proto_iconv=`echo "[$]am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` AC_MSG_RESULT([ $am_cv_proto_iconv]) else dnl When compiling GNU libiconv on a system that does not have iconv yet, dnl pick the POSIX compliant declaration without 'const'. am_cv_proto_iconv_arg1="" fi AC_DEFINE_UNQUOTED([ICONV_CONST], [$am_cv_proto_iconv_arg1], [Define as const if the declaration of iconv() needs const.]) dnl Also substitute ICONV_CONST in the gnulib generated . m4_ifdef([gl_ICONV_H_DEFAULTS], [AC_REQUIRE([gl_ICONV_H_DEFAULTS]) if test -n "$am_cv_proto_iconv_arg1"; then ICONV_CONST="const" fi ]) ]) # intlmacosx.m4 serial 8 (gettext-0.20.2) dnl Copyright (C) 2004-2014, 2016, 2019-2020 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl dnl This file can be used in projects which are not available under dnl the GNU General Public License or the GNU Lesser General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Lesser General Public License, and the rest of the GNU dnl gettext package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Checks for special options needed on Mac OS X. dnl Defines INTL_MACOSX_LIBS. AC_DEFUN([gt_INTL_MACOSX], [ dnl Check for API introduced in Mac OS X 10.4. AC_CACHE_CHECK([for CFPreferencesCopyAppValue], [gt_cv_func_CFPreferencesCopyAppValue], [gt_save_LIBS="$LIBS" LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[#include ]], [[CFPreferencesCopyAppValue(NULL, NULL)]])], [gt_cv_func_CFPreferencesCopyAppValue=yes], [gt_cv_func_CFPreferencesCopyAppValue=no]) LIBS="$gt_save_LIBS"]) if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then AC_DEFINE([HAVE_CFPREFERENCESCOPYAPPVALUE], [1], [Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in the CoreFoundation framework.]) fi dnl Don't check for the API introduced in Mac OS X 10.5, CFLocaleCopyCurrent, dnl because in macOS 10.13.4 it has the following behaviour: dnl When two or more languages are specified in the dnl "System Preferences > Language & Region > Preferred Languages" panel, dnl it returns en_CC where CC is the territory (even when English is not among dnl the preferred languages!). What we want instead is what dnl CFLocaleCopyCurrent returned in earlier macOS releases and what dnl CFPreferencesCopyAppValue still returns, namely ll_CC where ll is the dnl first among the preferred languages and CC is the territory. AC_CACHE_CHECK([for CFLocaleCopyPreferredLanguages], [gt_cv_func_CFLocaleCopyPreferredLanguages], [gt_save_LIBS="$LIBS" LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" AC_LINK_IFELSE( [AC_LANG_PROGRAM( [[#include ]], [[CFLocaleCopyPreferredLanguages();]])], [gt_cv_func_CFLocaleCopyPreferredLanguages=yes], [gt_cv_func_CFLocaleCopyPreferredLanguages=no]) LIBS="$gt_save_LIBS"]) if test $gt_cv_func_CFLocaleCopyPreferredLanguages = yes; then AC_DEFINE([HAVE_CFLOCALECOPYPREFERREDLANGUAGES], [1], [Define to 1 if you have the Mac OS X function CFLocaleCopyPreferredLanguages in the CoreFoundation framework.]) fi INTL_MACOSX_LIBS= if test $gt_cv_func_CFPreferencesCopyAppValue = yes \ || test $gt_cv_func_CFLocaleCopyPreferredLanguages = yes; then INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" fi AC_SUBST([INTL_MACOSX_LIBS]) ]) # lib-ld.m4 serial 9 dnl Copyright (C) 1996-2003, 2009-2020 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl Subroutines of libtool.m4, dnl with replacements s/_*LT_PATH/AC_LIB_PROG/ and s/lt_/acl_/ to avoid dnl collision with libtool.m4. dnl From libtool-2.4. Sets the variable with_gnu_ld to yes or no. AC_DEFUN([AC_LIB_PROG_LD_GNU], [AC_CACHE_CHECK([if the linker ($LD) is GNU ld], [acl_cv_prog_gnu_ld], [# I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 /dev/null 2>&1 \ && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ || PATH_SEPARATOR=';' } fi if test -n "$LD"; then AC_MSG_CHECKING([for ld]) elif test "$GCC" = yes; then AC_MSG_CHECKING([for ld used by $CC]) elif test "$with_gnu_ld" = yes; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi if test -n "$LD"; then # Let the user override the test with a path. : else AC_CACHE_VAL([acl_cv_path_LD], [ acl_cv_path_LD= # Final result of this test ac_prog=ld # Program to search in $PATH if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw acl_output=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) acl_output=`($CC -print-prog-name=ld) 2>&5` ;; esac case $acl_output in # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld acl_output=`echo "$acl_output" | sed 's%\\\\%/%g'` while echo "$acl_output" | grep "$re_direlt" > /dev/null 2>&1; do acl_output=`echo $acl_output | sed "s%$re_direlt%/%"` done # Got the pathname. No search in PATH is needed. acl_cv_path_LD="$acl_output" ac_prog= ;; "") # If it fails, then pretend we aren't using GCC. ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac fi if test -n "$ac_prog"; then # Search for $ac_prog in $PATH. acl_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$acl_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$acl_cv_path_LD" -v 2>&1 conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done ]) wl="$acl_cv_wl" acl_libext="$acl_cv_libext" acl_shlibext="$acl_cv_shlibext" acl_libname_spec="$acl_cv_libname_spec" acl_library_names_spec="$acl_cv_library_names_spec" acl_hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" acl_hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" acl_hardcode_direct="$acl_cv_hardcode_direct" acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" dnl Determine whether the user wants rpath handling at all. AC_ARG_ENABLE([rpath], [ --disable-rpath do not hardcode runtime library paths], :, enable_rpath=yes) ]) dnl AC_LIB_FROMPACKAGE(name, package) dnl declares that libname comes from the given package. The configure file dnl will then not have a --with-libname-prefix option but a dnl --with-package-prefix option. Several libraries can come from the same dnl package. This declaration must occur before an AC_LIB_LINKFLAGS or similar dnl macro call that searches for libname. AC_DEFUN([AC_LIB_FROMPACKAGE], [ pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) define([acl_frompackage_]NAME, [$2]) popdef([NAME]) pushdef([PACK],[$2]) pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) define([acl_libsinpackage_]PACKUP, m4_ifdef([acl_libsinpackage_]PACKUP, [m4_defn([acl_libsinpackage_]PACKUP)[, ]],)[lib$1]) popdef([PACKUP]) popdef([PACK]) ]) dnl AC_LIB_LINKFLAGS_BODY(name [, dependencies]) searches for libname and dnl the libraries corresponding to explicit and implicit dependencies. dnl Sets the LIB${NAME}, LTLIB${NAME} and INC${NAME} variables. dnl Also, sets the LIB${NAME}_PREFIX variable to nonempty if libname was found dnl in ${LIB${NAME}_PREFIX}/$acl_libdirstem. AC_DEFUN([AC_LIB_LINKFLAGS_BODY], [ AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) pushdef([PACK],[m4_ifdef([acl_frompackage_]NAME, [acl_frompackage_]NAME, lib[$1])]) pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) pushdef([PACKLIBS],[m4_ifdef([acl_frompackage_]NAME, [acl_libsinpackage_]PACKUP, lib[$1])]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" ]) AC_ARG_WITH(PACK[-prefix], [[ --with-]]PACK[[-prefix[=DIR] search for ]PACKLIBS[ in DIR/include and DIR/lib --without-]]PACK[[-prefix don't search for ]PACKLIBS[ in includedir and libdir]], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" additional_libdir2="$withval/$acl_libdirstem2" additional_libdir3="$withval/$acl_libdirstem3" fi fi ]) if test "X$additional_libdir2" = "X$additional_libdir"; then additional_libdir2= fi if test "X$additional_libdir3" = "X$additional_libdir"; then additional_libdir3= fi dnl Search the library and its dependencies in $additional_libdir and dnl $LDFLAGS. Using breadth-first-seach. LIB[]NAME= LTLIB[]NAME= INC[]NAME= LIB[]NAME[]_PREFIX= dnl HAVE_LIB${NAME} is an indicator that LIB${NAME}, LTLIB${NAME} have been dnl computed. So it has to be reset here. HAVE_LIB[]NAME= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='$1 $2' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" dnl See if it was already located by an earlier AC_LIB_LINKFLAGS dnl or AC_LIB_HAVE_LINKFLAGS call. uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./+-|ABCDEFGHIJKLMNOPQRSTUVWXYZ____|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$value" else dnl An earlier call to AC_LIB_HAVE_LINKFLAGS has determined dnl that this library doesn't exist. So just drop it. : fi else dnl Search the library lib$name in $additional_libdir and $LDFLAGS dnl and the already constructed $LIBNAME/$LTLIBNAME. found_dir= found_la= found_so= found_a= eval libname=\"$acl_libname_spec\" # typically: libname=lib$name if test -n "$acl_shlibext"; then shrext=".$acl_shlibext" # typically: shrext=.so else shrext= fi if test $use_additional = yes; then for additional_libdir_variable in additional_libdir additional_libdir2 additional_libdir3; do if test "X$found_dir" = "X"; then eval dir=\$$additional_libdir_variable if test -n "$dir"; then dnl The same code as in the loop below: dnl First look for a shared library. if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi dnl Then look for a static library. if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi fi fi done fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` dnl First look for a shared library. if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi dnl Then look for a static library. if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then dnl Found the library. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then dnl Linking with a shared library. We attempt to hardcode its dnl directory into the executable's runpath, unless it's the dnl standard /usr/lib. if test "$enable_rpath" = no \ || test "X$found_dir" = "X/usr/$acl_libdirstem" \ || test "X$found_dir" = "X/usr/$acl_libdirstem2" \ || test "X$found_dir" = "X/usr/$acl_libdirstem3"; then dnl No hardcoding is needed. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl Use an explicit option to hardcode DIR into the resulting dnl binary. dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi dnl The hardcoding into $LIBNAME is system dependent. if test "$acl_hardcode_direct" = yes; then dnl Using DIR/libNAME.so during linking hardcodes DIR into the dnl resulting binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then dnl Use an explicit option to hardcode DIR into the resulting dnl binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else dnl Rely on "-L$found_dir". dnl But don't add it if it's already contained in the LDFLAGS dnl or the already constructed $LIBNAME haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl We cannot use $acl_hardcode_runpath_var and LD_RUN_PATH dnl here, because this doesn't fit in flags passed to the dnl compiler. So give up. No hardcoding. This affects only dnl very old systems. dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then dnl Linking with a static library. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_a" else dnl We shouldn't come here, but anyway it's good to have a dnl fallback. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir -l$name" fi fi dnl Assume the include files are nearby. additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` if test "$name" = '$1'; then LIB[]NAME[]_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; */$acl_libdirstem2 | */$acl_libdirstem2/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem2/"'*$,,'` if test "$name" = '$1'; then LIB[]NAME[]_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; */$acl_libdirstem3 | */$acl_libdirstem3/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem3/"'*$,,'` if test "$name" = '$1'; then LIB[]NAME[]_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then dnl Potentially add $additional_includedir to $INCNAME. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's /usr/local/include and we are using GCC on Linux, dnl 3. if it's already present in $CPPFLAGS or the already dnl constructed $INCNAME, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INC[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $INCNAME. INC[]NAME="${INC[]NAME}${INC[]NAME:+ }-I$additional_includedir" fi fi fi fi fi dnl Look for dependencies. if test -n "$found_la"; then dnl Read the .la file. It defines the variables dnl dlname, library_names, old_library, dependency_libs, current, dnl age, revision, installed, dlopen, dlpreopen, libdir. save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" dnl We use only dependency_libs. for dep in $dependency_libs; do case "$dep" in -L*) dependency_libdir=`echo "X$dep" | sed -e 's/^X-L//'` dnl Potentially add $dependency_libdir to $LIBNAME and $LTLIBNAME. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's /usr/local/lib and we are using GCC on Linux, dnl 3. if it's already present in $LDFLAGS or the already dnl constructed $LIBNAME, dnl 4. if it doesn't exist as a directory. if test "X$dependency_libdir" != "X/usr/$acl_libdirstem" \ && test "X$dependency_libdir" != "X/usr/$acl_libdirstem2" \ && test "X$dependency_libdir" != "X/usr/$acl_libdirstem3"; then haveit= if test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem" \ || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem2" \ || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem3"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$dependency_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$dependency_libdir"; then dnl Really add $dependency_libdir to $LIBNAME. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$dependency_libdir" fi fi haveit= for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$dependency_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$dependency_libdir"; then dnl Really add $dependency_libdir to $LTLIBNAME. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$dependency_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) dnl Handle this in the next round. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) dnl Handle this in the next round. Throw away the .la's dnl directory; it is already contained in a preceding -L dnl option. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) dnl Most likely an immediate library name. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$dep" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$dep" ;; esac done fi else dnl Didn't find the library; assume it is in the system directories dnl known to the linker and runtime loader. (All the system dnl directories known to the linker should also be known to the dnl runtime loader, otherwise the system is severely misconfigured.) LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$acl_hardcode_libdir_separator"; then dnl Weird platform: only the last -rpath option counts, the user must dnl pass all path elements in one option. We can arrange that for a dnl single library, but not when more than one $LIBNAMEs are used. alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" done dnl Note: acl_hardcode_libdir_flag_spec uses $libdir and $wl. acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" else dnl The -rpath options are cumulative. for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then dnl When using libtool, the option that works for both libraries and dnl executables is -R. The -R options are cumulative. for found_dir in $ltrpathdirs; do LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-R$found_dir" done fi popdef([PACKLIBS]) popdef([PACKUP]) popdef([PACK]) popdef([NAME]) ]) dnl AC_LIB_APPENDTOVAR(VAR, CONTENTS) appends the elements of CONTENTS to VAR, dnl unless already present in VAR. dnl Works only for CPPFLAGS, not for LIB* variables because that sometimes dnl contains two or three consecutive elements that belong together. AC_DEFUN([AC_LIB_APPENDTOVAR], [ for element in [$2]; do haveit= for x in $[$1]; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then [$1]="${[$1]}${[$1]:+ }$element" fi done ]) dnl For those cases where a variable contains several -L and -l options dnl referring to unknown libraries and directories, this macro determines the dnl necessary additional linker options for the runtime path. dnl AC_LIB_LINKFLAGS_FROM_LIBS([LDADDVAR], [LIBSVALUE], [USE-LIBTOOL]) dnl sets LDADDVAR to linker options needed together with LIBSVALUE. dnl If USE-LIBTOOL evaluates to non-empty, linking with libtool is assumed, dnl otherwise linking without libtool is assumed. AC_DEFUN([AC_LIB_LINKFLAGS_FROM_LIBS], [ AC_REQUIRE([AC_LIB_RPATH]) AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) $1= if test "$enable_rpath" != no; then if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then dnl Use an explicit option to hardcode directories into the resulting dnl binary. rpathdirs= next= for opt in $2; do if test -n "$next"; then dir="$next" dnl No need to hardcode the standard /usr/lib. if test "X$dir" != "X/usr/$acl_libdirstem" \ && test "X$dir" != "X/usr/$acl_libdirstem2" \ && test "X$dir" != "X/usr/$acl_libdirstem3"; then rpathdirs="$rpathdirs $dir" fi next= else case $opt in -L) next=yes ;; -L*) dir=`echo "X$opt" | sed -e 's,^X-L,,'` dnl No need to hardcode the standard /usr/lib. if test "X$dir" != "X/usr/$acl_libdirstem" \ && test "X$dir" != "X/usr/$acl_libdirstem2" \ && test "X$dir" != "X/usr/$acl_libdirstem3"; then rpathdirs="$rpathdirs $dir" fi next= ;; *) next= ;; esac fi done if test "X$rpathdirs" != "X"; then if test -n ""$3""; then dnl libtool is used for linking. Use -R options. for dir in $rpathdirs; do $1="${$1}${$1:+ }-R$dir" done else dnl The linker is used for linking directly. if test -n "$acl_hardcode_libdir_separator"; then dnl Weird platform: only the last -rpath option counts, the user dnl must pass all path elements in one option. alldirs= for dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" $1="$flag" else dnl The -rpath options are cumulative. for dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" $1="${$1}${$1:+ }$flag" done fi fi fi fi fi AC_SUBST([$1]) ]) # lib-prefix.m4 serial 17 dnl Copyright (C) 2001-2005, 2008-2020 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Bruno Haible. dnl AC_LIB_PREFIX adds to the CPPFLAGS and LDFLAGS the flags that are needed dnl to access previously installed libraries. The basic assumption is that dnl a user will want packages to use other packages he previously installed dnl with the same --prefix option. dnl This macro is not needed if only AC_LIB_LINKFLAGS is used to locate dnl libraries, but is otherwise very convenient. AC_DEFUN([AC_LIB_PREFIX], [ AC_BEFORE([$0], [AC_LIB_LINKFLAGS]) AC_REQUIRE([AC_PROG_CC]) AC_REQUIRE([AC_CANONICAL_HOST]) AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) AC_ARG_WITH([lib-prefix], [[ --with-lib-prefix[=DIR] search for libraries in DIR/include and DIR/lib --without-lib-prefix don't search for libraries in includedir and libdir]], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" fi fi ]) if test $use_additional = yes; then dnl Potentially add $additional_includedir to $CPPFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's already present in $CPPFLAGS, dnl 3. if it's /usr/local/include and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= for x in $CPPFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $CPPFLAGS. CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }-I$additional_includedir" fi fi fi fi dnl Potentially add $additional_libdir to $LDFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's already present in $LDFLAGS, dnl 3. if it's /usr/local/lib and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then haveit= for x in $LDFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LDFLAGS. LDFLAGS="${LDFLAGS}${LDFLAGS:+ }-L$additional_libdir" fi fi fi fi fi ]) dnl AC_LIB_PREPARE_PREFIX creates variables acl_final_prefix, dnl acl_final_exec_prefix, containing the values to which $prefix and dnl $exec_prefix will expand at the end of the configure script. AC_DEFUN([AC_LIB_PREPARE_PREFIX], [ dnl Unfortunately, prefix and exec_prefix get only finally determined dnl at the end of configure. if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" ]) dnl AC_LIB_WITH_FINAL_PREFIX([statement]) evaluates statement, with the dnl variables prefix and exec_prefix bound to the values they will have dnl at the end of the configure script. AC_DEFUN([AC_LIB_WITH_FINAL_PREFIX], [ acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" $1 exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" ]) dnl AC_LIB_PREPARE_MULTILIB creates dnl - a function acl_is_expected_elfclass, that tests whether standard input dn; has a 32-bit or 64-bit ELF header, depending on the host CPU ABI, dnl - 3 variables acl_libdirstem, acl_libdirstem2, acl_libdirstem3, containing dnl the basename of the libdir to try in turn, either "lib" or "lib64" or dnl "lib/64" or "lib32" or "lib/sparcv9" or "lib/amd64" or similar. AC_DEFUN([AC_LIB_PREPARE_MULTILIB], [ dnl There is no formal standard regarding lib, lib32, and lib64. dnl On most glibc systems, the current practice is that on a system supporting dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under dnl $prefix/lib64 and 32-bit libraries go under $prefix/lib. However, on dnl Arch Linux based distributions, it's the opposite: 32-bit libraries go dnl under $prefix/lib32 and 64-bit libraries go under $prefix/lib. dnl We determine the compiler's default mode by looking at the compiler's dnl library search path. If at least one of its elements ends in /lib64 or dnl points to a directory whose absolute pathname ends in /lib64, we use that dnl for 64-bit ABIs. Similarly for 32-bit ABIs. Otherwise we use the default, dnl namely "lib". dnl On Solaris systems, the current practice is that on a system supporting dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under dnl $prefix/lib/64 (which is a symlink to either $prefix/lib/sparcv9 or dnl $prefix/lib/amd64) and 32-bit libraries go under $prefix/lib. AC_REQUIRE([AC_CANONICAL_HOST]) AC_REQUIRE([gl_HOST_CPU_C_ABI_32BIT]) AC_CACHE_CHECK([for ELF binary format], [gl_cv_elf], [AC_EGREP_CPP([Extensible Linking Format], [#ifdef __ELF__ Extensible Linking Format #endif ], [gl_cv_elf=yes], [gl_cv_elf=no]) ]) if test $gl_cv_elf; then # Extract the ELF class of a file (5th byte) in decimal. # Cf. https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header if od -A x < /dev/null >/dev/null 2>/dev/null; then # Use POSIX od. func_elfclass () { od -A n -t d1 -j 4 -N 1 } else # Use BSD hexdump. func_elfclass () { dd bs=1 count=1 skip=4 2>/dev/null | hexdump -e '1/1 "%3d "' echo } fi changequote(,)dnl case $HOST_CPU_C_ABI_32BIT in yes) # 32-bit ABI. acl_is_expected_elfclass () { test "`func_elfclass | sed -e 's/[ ]//g'`" = 1 } ;; no) # 64-bit ABI. acl_is_expected_elfclass () { test "`func_elfclass | sed -e 's/[ ]//g'`" = 2 } ;; *) # Unknown. acl_is_expected_elfclass () { : } ;; esac changequote([,])dnl else acl_is_expected_elfclass () { : } fi dnl Allow the user to override the result by setting acl_cv_libdirstems. AC_CACHE_CHECK([for the common suffixes of directories in the library search path], [acl_cv_libdirstems], [dnl Try 'lib' first, because that's the default for libdir in GNU, see dnl . acl_libdirstem=lib acl_libdirstem2= acl_libdirstem3= case "$host_os" in solaris*) dnl See Solaris 10 Software Developer Collection > Solaris 64-bit Developer's Guide > The Development Environment dnl . dnl "Portable Makefiles should refer to any library directories using the 64 symbolic link." dnl But we want to recognize the sparcv9 or amd64 subdirectory also if the dnl symlink is missing, so we set acl_libdirstem2 too. if test $HOST_CPU_C_ABI_32BIT = no; then acl_libdirstem2=lib/64 case "$host_cpu" in sparc*) acl_libdirstem3=lib/sparcv9 ;; i*86 | x86_64) acl_libdirstem3=lib/amd64 ;; esac fi ;; *) dnl If $CC generates code for a 32-bit ABI, the libraries are dnl surely under $prefix/lib or $prefix/lib32, not $prefix/lib64. dnl Similarly, if $CC generates code for a 64-bit ABI, the libraries dnl are surely under $prefix/lib or $prefix/lib64, not $prefix/lib32. dnl Find the compiler's search path. However, non-system compilers dnl sometimes have odd library search paths. But we can't simply invoke dnl '/usr/bin/gcc -print-search-dirs' because that would not take into dnl account the -m32/-m31 or -m64 options from the $CC or $CFLAGS. searchpath=`(LC_ALL=C $CC $CPPFLAGS $CFLAGS -print-search-dirs) 2>/dev/null \ | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'` if test $HOST_CPU_C_ABI_32BIT != no; then # 32-bit or unknown ABI. if test -d /usr/lib32; then acl_libdirstem2=lib32 fi fi if test $HOST_CPU_C_ABI_32BIT != yes; then # 64-bit or unknown ABI. if test -d /usr/lib64; then acl_libdirstem3=lib64 fi fi if test -n "$searchpath"; then acl_save_IFS="${IFS= }"; IFS=":" for searchdir in $searchpath; do if test -d "$searchdir"; then case "$searchdir" in */lib32/ | */lib32 ) acl_libdirstem2=lib32 ;; */lib64/ | */lib64 ) acl_libdirstem3=lib64 ;; */../ | */.. ) # Better ignore directories of this form. They are misleading. ;; *) searchdir=`cd "$searchdir" && pwd` case "$searchdir" in */lib32 ) acl_libdirstem2=lib32 ;; */lib64 ) acl_libdirstem3=lib64 ;; esac ;; esac fi done IFS="$acl_save_IFS" if test $HOST_CPU_C_ABI_32BIT = yes; then # 32-bit ABI. acl_libdirstem3= fi if test $HOST_CPU_C_ABI_32BIT = no; then # 64-bit ABI. acl_libdirstem2= fi fi ;; esac test -n "$acl_libdirstem2" || acl_libdirstem2="$acl_libdirstem" test -n "$acl_libdirstem3" || acl_libdirstem3="$acl_libdirstem" acl_cv_libdirstems="$acl_libdirstem,$acl_libdirstem2,$acl_libdirstem3" ]) dnl Decompose acl_cv_libdirstems into acl_libdirstem, acl_libdirstem2, and dnl acl_libdirstem3. changequote(,)dnl acl_libdirstem=`echo "$acl_cv_libdirstems" | sed -e 's/,.*//'` acl_libdirstem2=`echo "$acl_cv_libdirstems" | sed -e 's/^[^,]*,//' -e 's/,.*//'` acl_libdirstem3=`echo "$acl_cv_libdirstems" | sed -e 's/^[^,]*,[^,]*,//' -e 's/,.*//'` changequote([,])dnl ]) # nls.m4 serial 6 (gettext-0.20.2) dnl Copyright (C) 1995-2003, 2005-2006, 2008-2014, 2016, 2019-2020 Free dnl Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl dnl This file can be used in projects which are not available under dnl the GNU General Public License or the GNU Lesser General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Lesser General Public License, and the rest of the GNU dnl gettext package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1995-2000. dnl Bruno Haible , 2000-2003. AC_PREREQ([2.50]) AC_DEFUN([AM_NLS], [ AC_MSG_CHECKING([whether NLS is requested]) dnl Default is enabled NLS AC_ARG_ENABLE([nls], [ --disable-nls do not use Native Language Support], USE_NLS=$enableval, USE_NLS=yes) AC_MSG_RESULT([$USE_NLS]) AC_SUBST([USE_NLS]) ]) # pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- # serial 12 (pkg-config-0.29.2) dnl Copyright © 2004 Scott James Remnant . dnl Copyright © 2012-2015 Dan Nicholson dnl dnl This program is free software; you can redistribute it and/or modify dnl it under the terms of the GNU General Public License as published by dnl the Free Software Foundation; either version 2 of the License, or dnl (at your option) any later version. dnl dnl This program is distributed in the hope that it will be useful, but dnl WITHOUT ANY WARRANTY; without even the implied warranty of dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU dnl General Public License for more details. dnl dnl You should have received a copy of the GNU General Public License dnl along with this program; if not, write to the Free Software dnl Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA dnl 02111-1307, USA. dnl dnl As a special exception to the GNU General Public License, if you dnl distribute this file as part of a program that contains a dnl configuration script generated by Autoconf, you may include it under dnl the same distribution terms that you use for the rest of that dnl program. dnl PKG_PREREQ(MIN-VERSION) dnl ----------------------- dnl Since: 0.29 dnl dnl Verify that the version of the pkg-config macros are at least dnl MIN-VERSION. Unlike PKG_PROG_PKG_CONFIG, which checks the user's dnl installed version of pkg-config, this checks the developer's version dnl of pkg.m4 when generating configure. dnl dnl To ensure that this macro is defined, also add: dnl m4_ifndef([PKG_PREREQ], dnl [m4_fatal([must install pkg-config 0.29 or later before running autoconf/autogen])]) dnl dnl See the "Since" comment for each macro you use to see what version dnl of the macros you require. m4_defun([PKG_PREREQ], [m4_define([PKG_MACROS_VERSION], [0.29.2]) m4_if(m4_version_compare(PKG_MACROS_VERSION, [$1]), -1, [m4_fatal([pkg.m4 version $1 or higher is required but ]PKG_MACROS_VERSION[ found])]) ])dnl PKG_PREREQ dnl PKG_PROG_PKG_CONFIG([MIN-VERSION]) dnl ---------------------------------- dnl Since: 0.16 dnl dnl Search for the pkg-config tool and set the PKG_CONFIG variable to dnl first found in the path. Checks that the version of pkg-config found dnl is at least MIN-VERSION. If MIN-VERSION is not specified, 0.9.0 is dnl used since that's the first version where most current features of dnl pkg-config existed. AC_DEFUN([PKG_PROG_PKG_CONFIG], [m4_pattern_forbid([^_?PKG_[A-Z_]+$]) m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$]) m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$]) AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) fi if test -n "$PKG_CONFIG"; then _pkg_min_version=m4_default([$1], [0.9.0]) AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) PKG_CONFIG="" fi fi[]dnl ])dnl PKG_PROG_PKG_CONFIG dnl PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) dnl ------------------------------------------------------------------- dnl Since: 0.18 dnl dnl Check to see whether a particular set of modules exists. Similar to dnl PKG_CHECK_MODULES(), but does not set variables or print errors. dnl dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) dnl only at the first occurence in configure.ac, so if the first place dnl it's called might be skipped (such as if it is within an "if", you dnl have to call PKG_CHECK_EXISTS manually AC_DEFUN([PKG_CHECK_EXISTS], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl if test -n "$PKG_CONFIG" && \ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then m4_default([$2], [:]) m4_ifvaln([$3], [else $3])dnl fi]) dnl _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) dnl --------------------------------------------- dnl Internal wrapper calling pkg-config via PKG_CONFIG and setting dnl pkg_failed based on the result. m4_define([_PKG_CONFIG], [if test -n "$$1"; then pkg_cv_[]$1="$$1" elif test -n "$PKG_CONFIG"; then PKG_CHECK_EXISTS([$3], [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes ], [pkg_failed=yes]) else pkg_failed=untried fi[]dnl ])dnl _PKG_CONFIG dnl _PKG_SHORT_ERRORS_SUPPORTED dnl --------------------------- dnl Internal check to see if pkg-config supports short errors. AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], [AC_REQUIRE([PKG_PROG_PKG_CONFIG]) if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi[]dnl ])dnl _PKG_SHORT_ERRORS_SUPPORTED dnl PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], dnl [ACTION-IF-NOT-FOUND]) dnl -------------------------------------------------------------- dnl Since: 0.4.0 dnl dnl Note that if there is a possibility the first call to dnl PKG_CHECK_MODULES might not happen, you should be sure to include an dnl explicit call to PKG_PROG_PKG_CONFIG in your configure.ac AC_DEFUN([PKG_CHECK_MODULES], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl pkg_failed=no AC_MSG_CHECKING([for $2]) _PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) _PKG_CONFIG([$1][_LIBS], [libs], [$2]) m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS and $1[]_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details.]) if test $pkg_failed = yes; then AC_MSG_RESULT([no]) _PKG_SHORT_ERRORS_SUPPORTED if test $_pkg_short_errors_supported = yes; then $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1` else $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD m4_default([$4], [AC_MSG_ERROR( [Package requirements ($2) were not met: $$1_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. _PKG_TEXT])[]dnl ]) elif test $pkg_failed = untried; then AC_MSG_RESULT([no]) m4_default([$4], [AC_MSG_FAILURE( [The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. _PKG_TEXT To get pkg-config, see .])[]dnl ]) else $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS $1[]_LIBS=$pkg_cv_[]$1[]_LIBS AC_MSG_RESULT([yes]) $3 fi[]dnl ])dnl PKG_CHECK_MODULES dnl PKG_CHECK_MODULES_STATIC(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], dnl [ACTION-IF-NOT-FOUND]) dnl --------------------------------------------------------------------- dnl Since: 0.29 dnl dnl Checks for existence of MODULES and gathers its build flags with dnl static libraries enabled. Sets VARIABLE-PREFIX_CFLAGS from --cflags dnl and VARIABLE-PREFIX_LIBS from --libs. dnl dnl Note that if there is a possibility the first call to dnl PKG_CHECK_MODULES_STATIC might not happen, you should be sure to dnl include an explicit call to PKG_PROG_PKG_CONFIG in your dnl configure.ac. AC_DEFUN([PKG_CHECK_MODULES_STATIC], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl _save_PKG_CONFIG=$PKG_CONFIG PKG_CONFIG="$PKG_CONFIG --static" PKG_CHECK_MODULES($@) PKG_CONFIG=$_save_PKG_CONFIG[]dnl ])dnl PKG_CHECK_MODULES_STATIC dnl PKG_INSTALLDIR([DIRECTORY]) dnl ------------------------- dnl Since: 0.27 dnl dnl Substitutes the variable pkgconfigdir as the location where a module dnl should install pkg-config .pc files. By default the directory is dnl $libdir/pkgconfig, but the default can be changed by passing dnl DIRECTORY. The user can override through the --with-pkgconfigdir dnl parameter. AC_DEFUN([PKG_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([pkgconfigdir], [AS_HELP_STRING([--with-pkgconfigdir], pkg_description)],, [with_pkgconfigdir=]pkg_default) AC_SUBST([pkgconfigdir], [$with_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ])dnl PKG_INSTALLDIR dnl PKG_NOARCH_INSTALLDIR([DIRECTORY]) dnl -------------------------------- dnl Since: 0.27 dnl dnl Substitutes the variable noarch_pkgconfigdir as the location where a dnl module should install arch-independent pkg-config .pc files. By dnl default the directory is $datadir/pkgconfig, but the default can be dnl changed by passing DIRECTORY. The user can override through the dnl --with-noarch-pkgconfigdir parameter. AC_DEFUN([PKG_NOARCH_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config arch-independent installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([noarch-pkgconfigdir], [AS_HELP_STRING([--with-noarch-pkgconfigdir], pkg_description)],, [with_noarch_pkgconfigdir=]pkg_default) AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ])dnl PKG_NOARCH_INSTALLDIR dnl PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE, dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) dnl ------------------------------------------- dnl Since: 0.28 dnl dnl Retrieves the value of the pkg-config variable for the given module. AC_DEFUN([PKG_CHECK_VAR], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])dnl _PKG_CONFIG([$1], [variable="][$3]["], [$2]) AS_VAR_COPY([$1], [pkg_cv_][$1]) AS_VAR_IF([$1], [""], [$5], [$4])dnl ])dnl PKG_CHECK_VAR # po.m4 serial 31 (gettext-0.20.2) dnl Copyright (C) 1995-2014, 2016, 2018-2020 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl dnl This file can be used in projects which are not available under dnl the GNU General Public License or the GNU Lesser General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Lesser General Public License, and the rest of the GNU dnl gettext package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1995-2000. dnl Bruno Haible , 2000-2003. AC_PREREQ([2.60]) dnl Checks for all prerequisites of the po subdirectory. AC_DEFUN([AM_PO_SUBDIRS], [ AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl AC_REQUIRE([AC_PROG_SED])dnl AC_REQUIRE([AM_NLS])dnl dnl Release version of the gettext macros. This is used to ensure that dnl the gettext macros and po/Makefile.in.in are in sync. AC_SUBST([GETTEXT_MACRO_VERSION], [0.20]) dnl Perform the following tests also if --disable-nls has been given, dnl because they are needed for "make dist" to work. dnl Search for GNU msgfmt in the PATH. dnl The first test excludes Solaris msgfmt and early GNU msgfmt versions. dnl The second test excludes FreeBSD msgfmt. AM_PATH_PROG_WITH_TEST(MSGFMT, msgfmt, [$ac_dir/$ac_word --statistics /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], :) AC_PATH_PROG([GMSGFMT], [gmsgfmt], [$MSGFMT]) dnl Test whether it is GNU msgfmt >= 0.15. changequote(,)dnl case `$GMSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) GMSGFMT_015=: ;; *) GMSGFMT_015=$GMSGFMT ;; esac changequote([,])dnl AC_SUBST([GMSGFMT_015]) dnl Search for GNU xgettext 0.12 or newer in the PATH. dnl The first test excludes Solaris xgettext and early GNU xgettext versions. dnl The second test excludes FreeBSD xgettext. AM_PATH_PROG_WITH_TEST(XGETTEXT, xgettext, [$ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], :) dnl Remove leftover from FreeBSD xgettext call. rm -f messages.po dnl Test whether it is GNU xgettext >= 0.15. changequote(,)dnl case `$XGETTEXT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) XGETTEXT_015=: ;; *) XGETTEXT_015=$XGETTEXT ;; esac changequote([,])dnl AC_SUBST([XGETTEXT_015]) dnl Search for GNU msgmerge 0.11 or newer in the PATH. AM_PATH_PROG_WITH_TEST(MSGMERGE, msgmerge, [$ac_dir/$ac_word --update -q /dev/null /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1], :) dnl Test whether it is GNU msgmerge >= 0.20. if LC_ALL=C $MSGMERGE --help | grep ' --for-msgfmt ' >/dev/null; then MSGMERGE_FOR_MSGFMT_OPTION='--for-msgfmt' else dnl Test whether it is GNU msgmerge >= 0.12. if LC_ALL=C $MSGMERGE --help | grep ' --no-fuzzy-matching ' >/dev/null; then MSGMERGE_FOR_MSGFMT_OPTION='--no-fuzzy-matching --no-location --quiet' else dnl With these old versions, $(MSGMERGE) $(MSGMERGE_FOR_MSGFMT_OPTION) is dnl slow. But this is not a big problem, as such old gettext versions are dnl hardly in use any more. MSGMERGE_FOR_MSGFMT_OPTION='--no-location --quiet' fi fi AC_SUBST([MSGMERGE_FOR_MSGFMT_OPTION]) dnl Support for AM_XGETTEXT_OPTION. test -n "${XGETTEXT_EXTRA_OPTIONS+set}" || XGETTEXT_EXTRA_OPTIONS= AC_SUBST([XGETTEXT_EXTRA_OPTIONS]) AC_CONFIG_COMMANDS([po-directories], [[ for ac_file in $CONFIG_FILES; do # Support "outfile[:infile[:infile...]]" case "$ac_file" in *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; esac # PO directories have a Makefile.in generated from Makefile.in.in. case "$ac_file" in */Makefile.in) # Adjust a relative srcdir. ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` ac_dir_suffix=/`echo "$ac_dir"|sed 's%^\./%%'` ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` # In autoconf-2.13 it is called $ac_given_srcdir. # In autoconf-2.50 it is called $srcdir. test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" case "$ac_given_srcdir" in .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; /*) top_srcdir="$ac_given_srcdir" ;; *) top_srcdir="$ac_dots$ac_given_srcdir" ;; esac # Treat a directory as a PO directory if and only if it has a # POTFILES.in file. This allows packages to have multiple PO # directories under different names or in different locations. if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then rm -f "$ac_dir/POTFILES" test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" gt_tab=`printf '\t'` cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ${gt_tab}]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" POMAKEFILEDEPS="POTFILES.in" # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend # on $ac_dir but don't depend on user-specified configuration # parameters. if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then # The LINGUAS file contains the set of available languages. if test -n "$OBSOLETE_ALL_LINGUAS"; then test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" fi ALL_LINGUAS=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"` POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" else # The set of available languages was given in configure.in. ALL_LINGUAS=$OBSOLETE_ALL_LINGUAS fi # Compute POFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po) # Compute UPDATEPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update) # Compute DUMMYPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop) # Compute GMOFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo) case "$ac_given_srcdir" in .) srcdirpre= ;; *) srcdirpre='$(srcdir)/' ;; esac POFILES= UPDATEPOFILES= DUMMYPOFILES= GMOFILES= for lang in $ALL_LINGUAS; do POFILES="$POFILES $srcdirpre$lang.po" UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" DUMMYPOFILES="$DUMMYPOFILES $lang.nop" GMOFILES="$GMOFILES $srcdirpre$lang.gmo" done # CATALOGS depends on both $ac_dir and the user's LINGUAS # environment variable. INST_LINGUAS= if test -n "$ALL_LINGUAS"; then for presentlang in $ALL_LINGUAS; do useit=no if test "%UNSET%" != "$LINGUAS"; then desiredlanguages="$LINGUAS" else desiredlanguages="$ALL_LINGUAS" fi for desiredlang in $desiredlanguages; do # Use the presentlang catalog if desiredlang is # a. equal to presentlang, or # b. a variant of presentlang (because in this case, # presentlang can be used as a fallback for messages # which are not translated in the desiredlang catalog). case "$desiredlang" in "$presentlang"*) useit=yes;; esac done if test $useit = yes; then INST_LINGUAS="$INST_LINGUAS $presentlang" fi done fi CATALOGS= if test -n "$INST_LINGUAS"; then for lang in $INST_LINGUAS; do CATALOGS="$CATALOGS $lang.gmo" done fi test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do if test -f "$f"; then case "$f" in *.orig | *.bak | *~) ;; *) cat "$f" >> "$ac_dir/Makefile" ;; esac fi done fi ;; esac done]], [# Capture the value of obsolete ALL_LINGUAS because we need it to compute # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. OBSOLETE_ALL_LINGUAS="$ALL_LINGUAS" # Capture the value of LINGUAS because we need it to compute CATALOGS. LINGUAS="${LINGUAS-%UNSET%}" ]) ]) dnl Postprocesses a Makefile in a directory containing PO files. AC_DEFUN([AM_POSTPROCESS_PO_MAKEFILE], [ # When this code is run, in config.status, two variables have already been # set: # - OBSOLETE_ALL_LINGUAS is the value of LINGUAS set in configure.in, # - LINGUAS is the value of the environment variable LINGUAS at configure # time. changequote(,)dnl # Adjust a relative srcdir. ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` ac_dir_suffix=/`echo "$ac_dir"|sed 's%^\./%%'` ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` # In autoconf-2.13 it is called $ac_given_srcdir. # In autoconf-2.50 it is called $srcdir. test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" case "$ac_given_srcdir" in .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; /*) top_srcdir="$ac_given_srcdir" ;; *) top_srcdir="$ac_dots$ac_given_srcdir" ;; esac # Find a way to echo strings without interpreting backslash. if test "X`(echo '\t') 2>/dev/null`" = 'X\t'; then gt_echo='echo' else if test "X`(printf '%s\n' '\t') 2>/dev/null`" = 'X\t'; then gt_echo='printf %s\n' else echo_func () { cat < "$ac_file.tmp" tab=`printf '\t'` if grep -l '@TCLCATALOGS@' "$ac_file" > /dev/null; then # Add dependencies that cannot be formulated as a simple suffix rule. for lang in $ALL_LINGUAS; do frobbedlang=`echo $lang | sed -e 's/\..*$//' -e 'y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/'` cat >> "$ac_file.tmp" < /dev/null; then # Add dependencies that cannot be formulated as a simple suffix rule. for lang in $ALL_LINGUAS; do frobbedlang=`echo $lang | sed -e 's/_/-/g' -e 's/^sr-CS/sr-SP/' -e 's/@latin$/-Latn/' -e 's/@cyrillic$/-Cyrl/' -e 's/^sr-SP$/sr-SP-Latn/' -e 's/^uz-UZ$/uz-UZ-Latn/'` cat >> "$ac_file.tmp" <> "$ac_file.tmp" <, 1996. AC_PREREQ([2.50]) # Search path for a program which passes the given test. dnl AM_PATH_PROG_WITH_TEST(VARIABLE, PROG-TO-CHECK-FOR, dnl TEST-PERFORMED-ON-FOUND_PROGRAM [, VALUE-IF-NOT-FOUND [, PATH]]) AC_DEFUN([AM_PATH_PROG_WITH_TEST], [ # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then # Determine PATH_SEPARATOR by trying to find /bin/sh in a PATH which # contains only /bin. Note that ksh looks also at the FPATH variable, # so we have to set that as well for the test. PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ || PATH_SEPARATOR=';' } fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "$2", so it can be a program name with args. set dummy $2; ac_word=[$]2 AC_MSG_CHECKING([for $ac_word]) AC_CACHE_VAL([ac_cv_path_$1], [case "[$]$1" in [[\\/]]* | ?:[[\\/]]*) ac_cv_path_$1="[$]$1" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in ifelse([$5], , $PATH, [$5]); do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&AS_MESSAGE_LOG_FD if [$3]; then ac_cv_path_$1="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" dnl If no 4th arg is given, leave the cache variable unset, dnl so AC_PATH_PROGS will keep looking. ifelse([$4], , , [ test -z "[$]ac_cv_path_$1" && ac_cv_path_$1="$4" ])dnl ;; esac])dnl $1="$ac_cv_path_$1" if test ifelse([$4], , [-n "[$]$1"], ["[$]$1" != "$4"]); then AC_MSG_RESULT([$][$1]) else AC_MSG_RESULT([no]) fi AC_SUBST([$1])dnl ]) m4_include([Makeup/ac-fragments/makeup.m4]) m4_include([Makeup/ac-fragments/pthread.m4]) m4_include([Makeup/ac-fragments/tr1.m4]) bit-babbler-0.9/configure0000755000000000000000000162721014136173163012342 0ustar #! /bin/sh # From configure.ac generated by Makeup 0.38. # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69 for bit-babbler 0.9. # # Report bugs to . # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. # # Copyright (C) 2003 - 2021, Ron ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org and ron@debian.org $0: about your system, including any error possibly output $0: before this message. Then install a modern shell, or $0: manually run the script under such a shell if you do $0: have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='bit-babbler' PACKAGE_TARNAME='bit-babbler' PACKAGE_VERSION='0.9' PACKAGE_STRING='bit-babbler 0.9' PACKAGE_BUGREPORT='ron@debian.org' PACKAGE_URL='' ac_unique_file="Makeup/Makeup.conf" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" gt_needs= ac_subst_vars='LTLIBOBJS LIBOBJS MAKEUP_FLAVOUR_HEADER MAKEUP_PLATFORM_HEADER LIBVIRT_SOCKET USB_LIBS USB_LDFLAGS USB_CPPFLAGS LIBUSB_DIR THREAD_STACK_SIZE SEEDD_CONTROL_SOCKET SYSCTL_DIR SYSTEMD_UNIT_DIR UDEV_RULES_DIR UDEV_LIBS UDEV_CPPFLAGS PKG_CONFIG_LIBDIR PKG_CONFIG_PATH PKG_CONFIG mu_path_iconv POSUB LTLIBINTL LIBINTL INTLLIBS LTLIBICONV LIBICONV INTL_MACOSX_LIBS XGETTEXT_EXTRA_OPTIONS MSGMERGE_FOR_MSGFMT_OPTION MSGMERGE XGETTEXT_015 XGETTEXT GMSGFMT_015 GMSGFMT MSGFMT GETTEXT_MACRO_VERSION USE_NLS SED MKDIR_P SET_MAKE GETTEXT_MSG_SRC ALL_LINGUAS MSGINIT XGETTEXT_ARGS EGREP GREP GENHTML LCOV INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM WINDRES AR RANLIB YACC LEXLIB LEX_OUTPUT_ROOT LEX CXXCPP ac_ct_CXX CXXFLAGS CXX CPP OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC WINRCFLAGS EXP_LOCALEDIR EXP_MANDIR EXP_DOCDIR EXP_DATADIR EXP_LIBDIR EXP_INCLUDEDIR EXP_SBINDIR EXP_BINDIR EXP_EXEC_PREFIX EXP_PREFIX SYSTEM_RUNDIR DSOEXT MAKEUP_DEFAULT_LINKAGE MAKEUP_HOST_ARCH EXTRALIBS EXTRALEXFLAGS EXTRAYACCFLAGS EXTRALDFLAGS EXTRACXXFLAGS EXTRACFLAGS EXTRACPPFLAGS PTHREAD_LDFLAGS PTHREAD_CPPFLAGS HOST_PICFLAGS PICFLAGS LEXFLAGS YACCFLAGS ARFLAGS RC_SEP CXX_SEARCH CC_SEARCH CXX_STANDARD C_STANDARD host_os host_vendor host_cpu host build_os build_vendor build_cpu build LN_S target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir runstatedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_pipe enable_optimisation enable_debug enable_profile enable_extra_warnings enable_werror enable_valgrind_friendly enable_bison_deprecated_warnings enable_code_suggestions enable_clang_almost_everything enable_fortify_source enable_stack_protector enable_relro enable_bind_now enable_san enable_tsan enable_shared enable_static enable_wide_strings with_iconv with_gettext enable_nls with_gnu_ld enable_rpath with_libiconv_prefix with_libintl_prefix with_udev enable_systemd enable_sysctl ' ac_precious_vars='build_alias host_alias target_alias C_STANDARD CXX_STANDARD CC_SEARCH CXX_SEARCH RC_SEP ARFLAGS YACCFLAGS LEXFLAGS PICFLAGS HOST_PICFLAGS PTHREAD_CPPFLAGS PTHREAD_LDFLAGS EXTRACPPFLAGS EXTRACFLAGS EXTRACXXFLAGS EXTRALDFLAGS EXTRAYACCFLAGS EXTRALEXFLAGS EXTRALIBS MAKEUP_HOST_ARCH MAKEUP_DEFAULT_LINKAGE DSOEXT SYSTEM_RUNDIR EXP_PREFIX EXP_EXEC_PREFIX EXP_BINDIR EXP_SBINDIR EXP_INCLUDEDIR EXP_LIBDIR EXP_DATADIR EXP_DOCDIR EXP_MANDIR EXP_LOCALEDIR WINRCFLAGS CC CFLAGS LDFLAGS LIBS CPPFLAGS CPP CXX CXXFLAGS CCC CXXCPP XGETTEXT_ARGS MSGINIT ALL_LINGUAS GETTEXT_MSG_SRC PKG_CONFIG PKG_CONFIG_PATH PKG_CONFIG_LIBDIR UDEV_RULES_DIR SYSTEMD_UNIT_DIR SYSCTL_DIR SEEDD_CONTROL_SOCKET THREAD_STACK_SIZE LIBUSB_DIR USB_CPPFLAGS USB_LDFLAGS LIBVIRT_SOCKET MAKEUP_PLATFORM_HEADER MAKEUP_FLAVOUR_HEADER' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' runstatedir='${localstatedir}/run' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -runstatedir | --runstatedir | --runstatedi | --runstated \ | --runstate | --runstat | --runsta | --runst | --runs \ | --run | --ru | --r) ac_prev=runstatedir ;; -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ | --run=* | --ru=* | --r=*) runstatedir=$ac_optarg ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir runstatedir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures bit-babbler 0.9 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/bit-babbler] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of bit-babbler 0.9:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-pipe use pipes instead of temporary files for faster compilation (default yes) --enable-optimisation use compiler optimisation flags (default yes) --enable-debug enable extra debug code (default yes) --enable-profile use profiling flags (default no) --enable-extra_warnings use extra compiler warnings (default yes) --enable-werror fail on compile warnings, (default yes for release builds, no for debug builds) --enable-valgrind_friendly do extra cleanup to be valgrind clean (default no) --enable-bison_deprecated_warnings let bison3 bark about deprecated bison2 constructs (default no) --enable-code_suggestions let the compiler suggest optimisation and safety changes (default yes) --enable-clang_almost_everything[=version] build with most of clang's -Weverything warnings, optionally specifying the clang version to use (default no) --enable-fortify_source[=N] compile with -D_FORTIFY_SOURCE=N (default 2 if optimisation is enabled) --enable-stack_protector[=option] build with stack protection guards (default strong), may be set to 'strong', 'all', 'explicit', or a numeric value for the ssp-buffer-size parameter --enable-relro make process memory read-only after relocation where possible (default yes) --enable-bind_now resolve all symbols at process startup so that they can be included in relro (default yes if relro is enabled) --enable-san[=sanitizer,...] build with runtime sanitiser support (default no), pass a comma-separated list of sanitizers, else "address,undefined,float-divide-by-zero,float-cast-overflow,integer,nullability" will be used --enable-tsan shortcut option for --enable-san=thread,undefined,integer,nullability --enable-shared use dynamic linking (default yes) --enable-static use static linking (default no) --enable-wide_strings use wide characters by default for internal string storage (default NO) --disable-nls do not use Native Language Support --disable-rpath do not hardcode runtime library paths --enable-systemd install systemd unit files (default yes on Linux, else no) --enable-sysctl install sysctl configuration files (default yes on Linux, else no) Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-iconv use iconv (from glibc or libiconv) for string encoding conversion (default YES) --with-gettext use gettext (from glibc or libintl) to localise selected literal strings (default YES) --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-libiconv-prefix[=DIR] search for libiconv in DIR/include and DIR/lib --without-libiconv-prefix don't search for libiconv in includedir and libdir --with-libintl-prefix[=DIR] search for libintl in DIR/include and DIR/lib --without-libintl-prefix don't search for libintl in includedir and libdir --with-udev use libudev for device detection (default yes on Linux, else no) Some influential environment variables: C_STANDARD flags to set the compiler C standard to use CXX_STANDARD flags to set the compiler C++ standard to use CC_SEARCH space-separated list of which C compiler to prefer CXX_SEARCH space-separated list of which C++ compiler to prefer RC_SEP a hack for excluding windows resource files ARFLAGS options passed to ar YACCFLAGS options passed to bison/yacc LEXFLAGS options passed to flex/lex PICFLAGS extra flags for building dynamically linked object files HOST_PICFLAGS the PICFLAGS needed for the intended host system PTHREAD_CPPFLAGS C/C++ preprocessor flags for thread-safe builds PTHREAD_LDFLAGS C/C++ linker flags for thread-safe builds EXTRACPPFLAGS extra C preprocessor flags EXTRACFLAGS extra C compiler flags EXTRACXXFLAGS extra C++ compiler flags EXTRALDFLAGS extra linker flags EXTRAYACCFLAGS extra options passed to bison/yacc EXTRALEXFLAGS extra options passed to flex/lex EXTRALIBS extra libraries (to link before LIBS) MAKEUP_HOST_ARCH architecture that targets should be built for MAKEUP_DEFAULT_LINKAGE default linkage for binary targets DSOEXT filename extension for dynamic libraries SYSTEM_RUNDIR System directory for run-time variable data EXP_PREFIX The fully expanded $prefix path EXP_EXEC_PREFIX The fully expanded $exec_prefix path EXP_BINDIR The fully expanded $bindir path EXP_SBINDIR The fully expanded $sbindir path EXP_INCLUDEDIR The fully expanded $includedir path EXP_LIBDIR The fully expanded $libdir path EXP_DATADIR The fully expanded $datadir path EXP_DOCDIR The fully expanded $docdir path EXP_MANDIR The fully expanded $mandir path EXP_LOCALEDIR The fully expanded $localedir path WINRCFLAGS options passed to windres CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor CXX C++ compiler command CXXFLAGS C++ compiler flags CXXCPP C++ preprocessor XGETTEXT_ARGS xgettext arguments MSGINIT msginit command ALL_LINGUAS The list of supported ISO 639 language codes GETTEXT_MSG_SRC Limit the search for messages to $(GETTEXT_MSG_SRC)/ PKG_CONFIG path to pkg-config utility PKG_CONFIG_PATH directories to add to pkg-config's search path PKG_CONFIG_LIBDIR path overriding pkg-config's built-in search path UDEV_RULES_DIR where to install udev rules SYSTEMD_UNIT_DIR where to install systemd units SYSCTL_DIR where to install sysctl configuration SEEDD_CONTROL_SOCKET Set the default to use for the seedd control socket THREAD_STACK_SIZE Explicitly set the per-thread stack size in kB (if non-zero) LIBUSB_DIR Path for libusb (mostly for cross-compiling) USB_CPPFLAGS Extra CPPFLAGS for libusb (mostly for cross-compiling) USB_LDFLAGS Extra LDFLAGS for libusb (mostly for cross-compiling) LIBVIRT_SOCKET Path to the libvirtd unix control socket MAKEUP_PLATFORM_HEADER platform specific config header MAKEUP_FLAVOUR_HEADER feature specific config header Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to . _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF bit-babbler configure 0.9 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. Copyright (C) 2003 - 2021, Ron _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_cxx_try_cpp LINENO # ------------------------ # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_cpp # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ( $as_echo "## ----------------------------- ## ## Report this to ron@debian.org ## ## ----------------------------- ##" ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_cxx_try_run LINENO # ------------------------ # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_cxx_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_run # ac_fn_cxx_try_link LINENO # ------------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_link # ac_fn_cxx_check_func LINENO FUNC VAR # ------------------------------------ # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_cxx_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_cxx_check_func # ac_fn_cxx_check_decl LINENO SYMBOL VAR INCLUDES # ----------------------------------------------- # Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR # accordingly. ac_fn_cxx_check_decl () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack as_decl_name=`echo $2|sed 's/ *(.*//'` as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 $as_echo_n "checking whether $as_decl_name is declared... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { #ifndef $as_decl_name #ifdef __cplusplus (void) $as_decl_use; #else (void) $as_decl_name; #endif #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_cxx_check_decl # ac_fn_cxx_check_header_mongrel LINENO HEADER VAR INCLUDES # --------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_cxx_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ( $as_echo "## ----------------------------- ## ## Report this to ron@debian.org ## ## ----------------------------- ##" ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_cxx_check_header_mongrel cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by bit-babbler $as_me 0.9, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi gt_needs="$gt_needs need-ngettext" # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # We need these available before macros that follow. if test -z "$LN_S"; then exec 6>/dev/null { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no,using $LN_S" >&5 $as_echo "no,using $LN_S" >&6; } fi exec 6>&1 fi if test -n "Makeup/ac-aux"; then _filedest=$srcdir/Makeup/ac-aux mkdir -p $_filedest else _filedest=$srcdir fi if test -n ""; then _filesources="" else _filesources="/usr/share/misc /usr/share/automake* /usr/share/libtool" fi if test ! -e "$_filedest/config.guess" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $_filedest/config.guess" >&5 $as_echo_n "checking for $_filedest/config.guess... " >&6; } ( cd $_filedest for d in ".." "../.." ; do if test -r "$d/config.guess" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: linking from $d/config.guess." >&5 $as_echo "linking from $d/config.guess." >&6; } $LN_S "$d/config.guess" . break fi done ) if test ! -e "$_filedest/config.guess" ; then for d in $_filesources; do if test -r "$d/config.guess" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: copying from $d/config.guess." >&5 $as_echo "copying from $d/config.guess." >&6; } cp -a "$d/config.guess" "$_filedest/config.guess" break fi done fi if test ! -e "$_filedest/config.guess" ; then as_fn_error $? "Failed to locate config.guess. Stopping." "$LINENO" 5 fi fi if test -n "Makeup/ac-aux"; then _filedest=$srcdir/Makeup/ac-aux mkdir -p $_filedest else _filedest=$srcdir fi if test -n ""; then _filesources="" else _filesources="/usr/share/misc /usr/share/automake* /usr/share/libtool" fi if test ! -e "$_filedest/config.sub" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $_filedest/config.sub" >&5 $as_echo_n "checking for $_filedest/config.sub... " >&6; } ( cd $_filedest for d in ".." "../.." ; do if test -r "$d/config.sub" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: linking from $d/config.sub." >&5 $as_echo "linking from $d/config.sub." >&6; } $LN_S "$d/config.sub" . break fi done ) if test ! -e "$_filedest/config.sub" ; then for d in $_filesources; do if test -r "$d/config.sub" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: copying from $d/config.sub." >&5 $as_echo "copying from $d/config.sub." >&6; } cp -a "$d/config.sub" "$_filedest/config.sub" break fi done fi if test ! -e "$_filedest/config.sub" ; then as_fn_error $? "Failed to locate config.sub. Stopping." "$LINENO" 5 fi fi # This used to be ..._COPY_UNLESS_LOCAL, but using the # ..._CONFIG_AUX_DIR macro below has changed what we need. if test -n "Makeup/ac-aux"; then _filedest=$srcdir/Makeup/ac-aux mkdir -p $_filedest else _filedest=$srcdir fi if test -n ""; then _filesources="" else _filesources="/usr/share/misc /usr/share/automake* /usr/share/libtool" fi if test ! -e "$_filedest/install-sh" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $_filedest/install-sh" >&5 $as_echo_n "checking for $_filedest/install-sh... " >&6; } ( cd $_filedest for d in ".." "../.." ; do if test -r "$d/install-sh" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: linking from $d/install-sh." >&5 $as_echo "linking from $d/install-sh." >&6; } $LN_S "$d/install-sh" . break fi done ) if test ! -e "$_filedest/install-sh" ; then for d in $_filesources; do if test -r "$d/install-sh" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: copying from $d/install-sh." >&5 $as_echo "copying from $d/install-sh." >&6; } cp -a "$d/install-sh" "$_filedest/install-sh" break fi done fi if test ! -e "$_filedest/install-sh" ; then as_fn_error $? "Failed to locate install-sh. Stopping." "$LINENO" 5 fi fi # We need the files above in place, else this will fail. ac_aux_dir= for ac_dir in Makeup/ac-aux "$srcdir"/Makeup/ac-aux; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in Makeup/ac-aux \"$srcdir\"/Makeup/ac-aux" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. # Find out where we're building and who we're building for. # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if ${ac_cv_build+:} false; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if ${ac_cv_host+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac # Select the default language standard to use. CXX_STANDARD="-std=gnu++98" # Check standard args. # Check whether --enable-pipe was given. if test "${enable_pipe+set}" = set; then : enableval=$enable_pipe; mu_cv_enable_pipe=$enableval else mu_cv_enable_pipe=yes fi # Check whether --enable-optimisation was given. if test "${enable_optimisation+set}" = set; then : enableval=$enable_optimisation; mu_cv_enable_optimisation=$enableval else mu_cv_enable_optimisation=yes fi # Check whether --enable-debug was given. if test "${enable_debug+set}" = set; then : enableval=$enable_debug; mu_cv_enable_debug=$enableval else mu_cv_enable_debug=yes fi # Check whether --enable-profile was given. if test "${enable_profile+set}" = set; then : enableval=$enable_profile; mu_cv_enable_profiling=$enableval else mu_cv_enable_profiling=no fi # Check whether --enable-extra_warnings was given. if test "${enable_extra_warnings+set}" = set; then : enableval=$enable_extra_warnings; mu_cv_enable_extra_warnings=$enableval else mu_cv_enable_extra_warnings=yes fi # Check whether --enable-werror was given. if test "${enable_werror+set}" = set; then : enableval=$enable_werror; mu_cv_enable_fail_on_warning=$enableval else if test "$mu_cv_enable_debug" = yes; then : mu_cv_enable_fail_on_warning=no else mu_cv_enable_fail_on_warning=no fi fi # Check whether --enable-valgrind_friendly was given. if test "${enable_valgrind_friendly+set}" = set; then : enableval=$enable_valgrind_friendly; mu_cv_enable_valgrind_friendly=$enableval else mu_cv_enable_valgrind_friendly=no fi # Check whether --enable-bison_deprecated_warnings was given. if test "${enable_bison_deprecated_warnings+set}" = set; then : enableval=$enable_bison_deprecated_warnings; mu_cv_enable_bison_deprecated_warnings=$enableval else mu_cv_enable_bison_deprecated_warnings=no fi # Check whether --enable-code_suggestions was given. if test "${enable_code_suggestions+set}" = set; then : enableval=$enable_code_suggestions; mu_cv_enable_code_suggestions=$enableval else mu_cv_enable_code_suggestions=yes fi # Check whether --enable-clang_almost_everything was given. if test "${enable_clang_almost_everything+set}" = set; then : enableval=$enable_clang_almost_everything; mu_cv_enable_clang_almost_everything=$enableval else mu_cv_enable_clang_almost_everything=no fi if test "$mu_cv_enable_clang_almost_everything" = yes; then : mu_cv_prog_cc=${mu_cv_prog_cc:-clang} mu_cv_prog_cxx=${mu_cv_prog_cxx:-clang++} elif test "$mu_cv_enable_clang_almost_everything" != no; then : mu_cv_prog_cc=${mu_cv_prog_cc:-clang-$mu_cv_enable_clang_almost_everything} mu_cv_prog_cxx=${mu_cv_prog_cxx:-clang++-$mu_cv_enable_clang_almost_everything} fi # Check whether --enable-fortify_source was given. if test "${enable_fortify_source+set}" = set; then : enableval=$enable_fortify_source; if test "$enableval" = yes; then : mu_cv_enable_fortify_source=2 else mu_cv_enable_fortify_source=$enableval fi else if test "$mu_cv_enable_optimisation" = no; then : mu_cv_enable_fortify_source=no else mu_cv_enable_fortify_source=2 fi fi # Check whether --enable-stack_protector was given. if test "${enable_stack_protector+set}" = set; then : enableval=$enable_stack_protector; if test "$enableval" = yes; then : mu_cv_enable_stack_protector=strong else mu_cv_enable_stack_protector=$enableval fi else mu_cv_enable_stack_protector=strong fi # Check whether --enable-relro was given. if test "${enable_relro+set}" = set; then : enableval=$enable_relro; mu_cv_enable_relro=$enableval else mu_cv_enable_relro=yes fi # Check whether --enable-bind_now was given. if test "${enable_bind_now+set}" = set; then : enableval=$enable_bind_now; mu_cv_enable_bind_now=$enableval else mu_cv_enable_bind_now=$mu_cv_enable_relro fi # Check whether --enable-san was given. if test "${enable_san+set}" = set; then : enableval=$enable_san; if test "$enableval" = yes; then : mu_cv_enable_san="address,undefined,float-divide-by-zero,float-cast-overflow,integer,nullability" else mu_cv_enable_san=$enableval fi else mu_cv_enable_san=no fi # Check whether --enable-tsan was given. if test "${enable_tsan+set}" = set; then : enableval=$enable_tsan; if test "$enableval" = yes; then : mu_cv_enable_san="thread,undefined,integer,nullability" else mu_cv_enable_san=$enableval fi fi if test "$mu_cv_enable_san" != no; then : mu_cv_enable_fortify_source=no fi # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then : enableval=$enable_shared; mu_cv_enable_shared=$enableval else mu_cv_enable_shared=yes fi # Check whether --enable-static was given. if test "${enable_static+set}" = set; then : enableval=$enable_static; if test "$enableval" = yes; then : mu_cv_enable_shared=no fi fi acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_PREFIX=$prefix while test "$EXP_PREFIX" != "$acm_tmp_EXP_PREFIX"; do acm_tmp_EXP_PREFIX=$EXP_PREFIX eval EXP_PREFIX=$EXP_PREFIX done { acm_tmp_EXP_PREFIX=; unset acm_tmp_EXP_PREFIX;} cat >>confdefs.h <<_ACEOF #define EXP_PREFIX "$EXP_PREFIX" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_EXEC_PREFIX=$exec_prefix while test "$EXP_EXEC_PREFIX" != "$acm_tmp_EXP_EXEC_PREFIX"; do acm_tmp_EXP_EXEC_PREFIX=$EXP_EXEC_PREFIX eval EXP_EXEC_PREFIX=$EXP_EXEC_PREFIX done { acm_tmp_EXP_EXEC_PREFIX=; unset acm_tmp_EXP_EXEC_PREFIX;} cat >>confdefs.h <<_ACEOF #define EXP_EXEC_PREFIX "$EXP_EXEC_PREFIX" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_BINDIR=$bindir while test "$EXP_BINDIR" != "$acm_tmp_EXP_BINDIR"; do acm_tmp_EXP_BINDIR=$EXP_BINDIR eval EXP_BINDIR=$EXP_BINDIR done { acm_tmp_EXP_BINDIR=; unset acm_tmp_EXP_BINDIR;} cat >>confdefs.h <<_ACEOF #define EXP_BINDIR "$EXP_BINDIR" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_SBINDIR=$sbindir while test "$EXP_SBINDIR" != "$acm_tmp_EXP_SBINDIR"; do acm_tmp_EXP_SBINDIR=$EXP_SBINDIR eval EXP_SBINDIR=$EXP_SBINDIR done { acm_tmp_EXP_SBINDIR=; unset acm_tmp_EXP_SBINDIR;} cat >>confdefs.h <<_ACEOF #define EXP_SBINDIR "$EXP_SBINDIR" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_INCLUDEDIR=$includedir while test "$EXP_INCLUDEDIR" != "$acm_tmp_EXP_INCLUDEDIR"; do acm_tmp_EXP_INCLUDEDIR=$EXP_INCLUDEDIR eval EXP_INCLUDEDIR=$EXP_INCLUDEDIR done { acm_tmp_EXP_INCLUDEDIR=; unset acm_tmp_EXP_INCLUDEDIR;} cat >>confdefs.h <<_ACEOF #define EXP_INCLUDEDIR "$EXP_INCLUDEDIR" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_LIBDIR=$libdir while test "$EXP_LIBDIR" != "$acm_tmp_EXP_LIBDIR"; do acm_tmp_EXP_LIBDIR=$EXP_LIBDIR eval EXP_LIBDIR=$EXP_LIBDIR done { acm_tmp_EXP_LIBDIR=; unset acm_tmp_EXP_LIBDIR;} cat >>confdefs.h <<_ACEOF #define EXP_LIBDIR "$EXP_LIBDIR" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_DATADIR=$datadir while test "$EXP_DATADIR" != "$acm_tmp_EXP_DATADIR"; do acm_tmp_EXP_DATADIR=$EXP_DATADIR eval EXP_DATADIR=$EXP_DATADIR done { acm_tmp_EXP_DATADIR=; unset acm_tmp_EXP_DATADIR;} cat >>confdefs.h <<_ACEOF #define EXP_DATADIR "$EXP_DATADIR" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_DOCDIR=$docdir while test "$EXP_DOCDIR" != "$acm_tmp_EXP_DOCDIR"; do acm_tmp_EXP_DOCDIR=$EXP_DOCDIR eval EXP_DOCDIR=$EXP_DOCDIR done { acm_tmp_EXP_DOCDIR=; unset acm_tmp_EXP_DOCDIR;} cat >>confdefs.h <<_ACEOF #define EXP_DOCDIR "$EXP_DOCDIR" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_MANDIR=$mandir while test "$EXP_MANDIR" != "$acm_tmp_EXP_MANDIR"; do acm_tmp_EXP_MANDIR=$EXP_MANDIR eval EXP_MANDIR=$EXP_MANDIR done { acm_tmp_EXP_MANDIR=; unset acm_tmp_EXP_MANDIR;} cat >>confdefs.h <<_ACEOF #define EXP_MANDIR "$EXP_MANDIR" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix acm_save_ACM_EXPAND_DIR_prefix=$prefix acm_save_ACM_EXPAND_DIR_exec_prefix=$exec_prefix if test "$prefix" = "NONE"; then : prefix=$ac_default_prefix fi if test "$exec_prefix" = "NONE"; then : exec_prefix=$prefix fi EXP_LOCALEDIR=$localedir while test "$EXP_LOCALEDIR" != "$acm_tmp_EXP_LOCALEDIR"; do acm_tmp_EXP_LOCALEDIR=$EXP_LOCALEDIR eval EXP_LOCALEDIR=$EXP_LOCALEDIR done { acm_tmp_EXP_LOCALEDIR=; unset acm_tmp_EXP_LOCALEDIR;} cat >>confdefs.h <<_ACEOF #define EXP_LOCALEDIR "$EXP_LOCALEDIR" _ACEOF prefix=$acm_save_ACM_EXPAND_DIR_prefix exec_prefix=$acm_save_ACM_EXPAND_DIR_exec_prefix # Oddly enough, the most preferred compiler is a platform specific thing, not a # universal truth. Who could have guessed ... case $host in *-*-openbsd* | *-*-freebsd* | *-*-darwin* ) if test -z "$CC_SEARCH"; then : CC_SEARCH="clang gcc cc" fi if test -z "$CXX_SEARCH"; then : CXX_SEARCH="clang++ g++ c++" fi ;; * ) if test -z "$CC_SEARCH"; then : CC_SEARCH="gcc clang cc" fi if test -z "$CXX_SEARCH"; then : CXX_SEARCH="g++ clang++ c++" fi ;; esac SYSTEM_RUNDIR="/var/run" RC_SEP="#" case $host in *-*-linux* ) MAKEUP_HOST_ARCH="ELF" makeup_build_platform="linux" DSOEXT=".so" HOST_PICFLAGS="-fPIC" SYSTEM_RUNDIR="/run" ;; *-*-*bsd* | *-*-darwin* ) MAKEUP_HOST_ARCH="ELF" makeup_build_platform="bsd" DSOEXT=".so" HOST_PICFLAGS="-fPIC" ;; *-*-cygwin* | *-*-mingw32* ) MAKEUP_HOST_ARCH="PE" makeup_build_platform="msw" DSOEXT=".dll" HOST_PICFLAGS="-D_DLL=1 -D_WINDLL=1" WINRCFLAGS="--include-dir /usr/$host_alias/include" if test -n "$mu_cv_with_wx_build_dir"; then : WINRCFLAGS="$WINRCFLAGS --include-dir $mu_cv_with_wx_build_dir/../include" fi WINRCFLAGS="$WINRCFLAGS --define __WIN32__ --define __WIN95__ --define __GNUWIN32__" RC_SEP= ;; * ) as_fn_error $? "Unknown host type. Stopping." "$LINENO" 5 ;; esac if test "$mu_cv_enable_debug" = yes; then : makeup_build_flavour=d else makeup_build_flavour=r fi if test "$mu_cv_enable_shared" = yes; then : MAKEUP_DEFAULT_LINKAGE="shared" PICFLAGS="\$(HOST_PICFLAGS)" else MAKEUP_DEFAULT_LINKAGE="static" fi # Check standard tools. CC=${CC:-$mu_cv_prog_cc} CXX=${CXX:-$mu_cv_prog_cxx} if test "${CFLAGS+set}" = set; then : if test -n "$C_STANDARD"; then : CFLAGS="$C_STANDARD${CFLAGS:+ $CFLAGS}" fi else CFLAGS=$C_STANDARD mu_use_our_cflags=yes fi if test "${CXXFLAGS+set}" = set; then : if test -n "$CXX_STANDARD"; then : CXXFLAGS="$CXX_STANDARD${CXXFLAGS:+ $CXXFLAGS}" fi else CXXFLAGS=$CXX_STANDARD mu_use_our_cxxflags=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: Using ${C_STANDARD:-toolchain default} C standard" >&5 $as_echo "$as_me: Using ${C_STANDARD:-toolchain default} C standard" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: Using ${CXX_STANDARD:-toolchain default} C++ standard" >&5 $as_echo "$as_me: Using ${CXX_STANDARD:-toolchain default} C++ standard" >&6;} ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then for ac_prog in $CC_SEARCH do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in $CC_SEARCH do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in $CXX_SEARCH do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in $CXX_SEARCH do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes else CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 $as_echo_n "checking how to run the C++ preprocessor... " >&6; } if test -z "$CXXCPP"; then if ${ac_cv_prog_CXXCPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CXXCPP needs to be expanded for CXXCPP in "$CXX -E" "/lib/cpp" do ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CXXCPP=$CXXCPP fi CXXCPP=$ac_cv_prog_CXXCPP else ac_cv_prog_CXXCPP=$CXXCPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 $as_echo "$CXXCPP" >&6; } ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$C_STANDARD"; then : CPP="$CPP $C_STANDARD" fi if test -n "$CXX_STANDARD"; then : CXXCPP="$CXXCPP $CXX_STANDARD" fi for ac_prog in flex lex do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_LEX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$LEX"; then ac_cv_prog_LEX="$LEX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_LEX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LEX=$ac_cv_prog_LEX if test -n "$LEX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LEX" >&5 $as_echo "$LEX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$LEX" && break done test -n "$LEX" || LEX=":" if test "x$LEX" != "x:"; then cat >conftest.l <<_ACEOF %% a { ECHO; } b { REJECT; } c { yymore (); } d { yyless (1); } e { /* IRIX 6.5 flex 2.5.4 underquotes its yyless argument. */ yyless ((input () != 0)); } f { unput (yytext[0]); } . { BEGIN INITIAL; } %% #ifdef YYTEXT_POINTER extern char *yytext; #endif int main (void) { return ! yylex () + ! yywrap (); } _ACEOF { { ac_try="$LEX conftest.l" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$LEX conftest.l") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking lex output file root" >&5 $as_echo_n "checking lex output file root... " >&6; } if ${ac_cv_prog_lex_root+:} false; then : $as_echo_n "(cached) " >&6 else if test -f lex.yy.c; then ac_cv_prog_lex_root=lex.yy elif test -f lexyy.c; then ac_cv_prog_lex_root=lexyy else as_fn_error $? "cannot find output from $LEX; giving up" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_lex_root" >&5 $as_echo "$ac_cv_prog_lex_root" >&6; } LEX_OUTPUT_ROOT=$ac_cv_prog_lex_root if test -z "${LEXLIB+set}"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking lex library" >&5 $as_echo_n "checking lex library... " >&6; } if ${ac_cv_lib_lex+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_LIBS=$LIBS ac_cv_lib_lex='none needed' for ac_lib in '' -lfl -ll; do LIBS="$ac_lib $ac_save_LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ `cat $LEX_OUTPUT_ROOT.c` _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_lex=$ac_lib fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext test "$ac_cv_lib_lex" != 'none needed' && break done LIBS=$ac_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lex" >&5 $as_echo "$ac_cv_lib_lex" >&6; } test "$ac_cv_lib_lex" != 'none needed' && LEXLIB=$ac_cv_lib_lex fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether yytext is a pointer" >&5 $as_echo_n "checking whether yytext is a pointer... " >&6; } if ${ac_cv_prog_lex_yytext_pointer+:} false; then : $as_echo_n "(cached) " >&6 else # POSIX says lex can declare yytext either as a pointer or an array; the # default is implementation-dependent. Figure out which it is, since # not all implementations provide the %pointer and %array declarations. ac_cv_prog_lex_yytext_pointer=no ac_save_LIBS=$LIBS LIBS="$LEXLIB $ac_save_LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define YYTEXT_POINTER 1 `cat $LEX_OUTPUT_ROOT.c` _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_prog_lex_yytext_pointer=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_lex_yytext_pointer" >&5 $as_echo "$ac_cv_prog_lex_yytext_pointer" >&6; } if test $ac_cv_prog_lex_yytext_pointer = yes; then $as_echo "#define YYTEXT_POINTER 1" >>confdefs.h fi rm -f conftest.l $LEX_OUTPUT_ROOT.c fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}bison", so it can be a program name with args. set dummy ${ac_tool_prefix}bison; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_YACC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$YACC"; then ac_cv_prog_YACC="$YACC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_YACC="${ac_tool_prefix}bison" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi YACC=$ac_cv_prog_YACC if test -n "$YACC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $YACC" >&5 $as_echo "$YACC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_YACC"; then ac_ct_YACC=$YACC # Extract the first word of "bison", so it can be a program name with args. set dummy bison; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_YACC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_YACC"; then ac_cv_prog_ac_ct_YACC="$ac_ct_YACC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_YACC="bison" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_YACC=$ac_cv_prog_ac_ct_YACC if test -n "$ac_ct_YACC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_YACC" >&5 $as_echo "$ac_ct_YACC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_YACC" = x; then YACC=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac YACC=$ac_ct_YACC fi else YACC="$ac_cv_prog_YACC" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. set dummy ${ac_tool_prefix}ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AR="${ac_tool_prefix}ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_AR"; then ac_ct_AR=$AR # Extract the first word of "ar", so it can be a program name with args. set dummy ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_AR="ar" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_AR" = x; then AR=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi else AR="$ac_cv_prog_AR" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}windres", so it can be a program name with args. set dummy ${ac_tool_prefix}windres; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_WINDRES+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$WINDRES"; then ac_cv_prog_WINDRES="$WINDRES" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_WINDRES="${ac_tool_prefix}windres" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi WINDRES=$ac_cv_prog_WINDRES if test -n "$WINDRES"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $WINDRES" >&5 $as_echo "$WINDRES" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_WINDRES"; then ac_ct_WINDRES=$WINDRES # Extract the first word of "windres", so it can be a program name with args. set dummy windres; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_WINDRES+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_WINDRES"; then ac_cv_prog_ac_ct_WINDRES="$ac_ct_WINDRES" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_WINDRES="windres" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_WINDRES=$ac_cv_prog_ac_ct_WINDRES if test -n "$ac_ct_WINDRES"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_WINDRES" >&5 $as_echo "$ac_ct_WINDRES" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_WINDRES" = x; then WINDRES=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac WINDRES=$ac_ct_WINDRES fi else WINDRES="$ac_cv_prog_WINDRES" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' for ac_prog in lcov do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_LCOV+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$LCOV"; then ac_cv_prog_LCOV="$LCOV" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_LCOV="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LCOV=$ac_cv_prog_LCOV if test -n "$LCOV"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LCOV" >&5 $as_echo "$LCOV" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$LCOV" && break done test -n "$LCOV" || LCOV=":" for ac_prog in genhtml do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_GENHTML+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$GENHTML"; then ac_cv_prog_GENHTML="$GENHTML" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_GENHTML="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi GENHTML=$ac_cv_prog_GENHTML if test -n "$GENHTML"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GENHTML" >&5 $as_echo "$GENHTML" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$GENHTML" && break done test -n "$GENHTML" || GENHTML=":" # Configure toolchain options. if test "$mu_cv_enable_pipe" = yes; then : CFLAGS="${CFLAGS:+$CFLAGS }-pipe" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-pipe" fi if test "$mu_cv_enable_profiling" = yes; then : CFLAGS="${CFLAGS:+$CFLAGS }-pg" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-pg" LDFLAGS="${LDFLAGS:+$LDFLAGS }-pg" fi mu_common_flags= mu_cflags= mu_cxxflags= if test "$mu_cv_enable_optimisation" = yes; then : mu_common_flags="${mu_common_flags:+$mu_common_flags }-O2" fi if test "$mu_cv_enable_debug" = yes; then : mu_common_flags="${mu_common_flags:+$mu_common_flags }-g" fi if test "$mu_cv_enable_fail_on_warning" = yes; then : mu_common_flags="${mu_common_flags:+$mu_common_flags }-Werror" fi mu_common_flags="${mu_common_flags:+$mu_common_flags }-Wall" if test "$mu_cv_enable_extra_warnings" = yes; then : mu_common_flags="${mu_common_flags:+$mu_common_flags }-Wextra" mu_common_flags="${mu_common_flags:+$mu_common_flags }-Wpointer-arith" mu_common_flags="${mu_common_flags:+$mu_common_flags }-Wcast-qual" mu_common_flags="${mu_common_flags:+$mu_common_flags }-Wcast-align" mu_common_flags="${mu_common_flags:+$mu_common_flags }-Wformat=2" mu_common_flags="${mu_common_flags:+$mu_common_flags }-Wfloat-equal" mu_cflags="${mu_cflags:+$mu_cflags }-Wstrict-prototypes" mu_cflags="${mu_cflags:+$mu_cflags }-Wmissing-prototypes" mu_cxxflags="${mu_cxxflags:+$mu_cxxflags }-Woverloaded-virtual" fi if test "$mu_use_our_cflags" = yes; then : CFLAGS="${CFLAGS:+$CFLAGS }$mu_common_flags" CFLAGS="${CFLAGS:+$CFLAGS }$mu_cflags" fi if test "$mu_use_our_cxxflags" = yes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$mu_common_flags" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$mu_cxxflags" fi { mu_common_flags=; unset mu_common_flags;} { mu_cflags=; unset mu_cflags;} { mu_cxxflags=; unset mu_cxxflags;} ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu acm_save__ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Womg-wtf-not-an-option" ACM_C_WARNINGFAIL="" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC unknown warning options are errors" >&5 $as_echo_n "checking if $CC unknown warning options are errors... " >&6; } if ${mu_cv_C_flag_uwo+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag_uwo=no else mu_cv_C_flag_uwo=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag_uwo" >&5 $as_echo "$mu_cv_C_flag_uwo" >&6; } if test "$mu_cv_C_flag_uwo" = no; then : CFLAGS="${acm_save__ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION_CFLAGS:+$acm_save__ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION_CFLAGS }-Werror=unknown-warning-option -Womg-wtf-not-an-option" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Werror=unknown-warning-option" >&5 $as_echo_n "checking if $CC supports -Werror=unknown-warning-option... " >&6; } if ${mu_cv_C_flag_werror_uwo+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag_werror_uwo=no else mu_cv_C_flag_werror_uwo=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag_werror_uwo" >&5 $as_echo "$mu_cv_C_flag_werror_uwo" >&6; } if test "$mu_cv_C_flag_werror_uwo" = yes; then : ACM_C_WARNINGFAIL="-Werror=unknown-warning-option" else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Don't know how to make $CC fail with unknown warning options," >&5 $as_echo "$as_me: WARNING: Don't know how to make $CC fail with unknown warning options," >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: so later tests may (wrongly) decide to pass them to it anyway." >&5 $as_echo "$as_me: WARNING: so later tests may (wrongly) decide to pass them to it anyway." >&2;} fi fi CFLAGS=$acm_save__ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION_CFLAGS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save__ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Womg-wtf-not-an-option" ACM_CXX_WARNINGFAIL="" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX unknown warning options are errors" >&5 $as_echo_n "checking if $CXX unknown warning options are errors... " >&6; } if ${mu_cv_CXX_flag_uwo+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag_uwo=no else mu_cv_CXX_flag_uwo=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag_uwo" >&5 $as_echo "$mu_cv_CXX_flag_uwo" >&6; } if test "$mu_cv_CXX_flag_uwo" = no; then : CXXFLAGS="${acm_save__ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION_CXXFLAGS:+$acm_save__ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION_CXXFLAGS }-Werror=unknown-warning-option -Womg-wtf-not-an-option" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=unknown-warning-option" >&5 $as_echo_n "checking if $CXX supports -Werror=unknown-warning-option... " >&6; } if ${mu_cv_CXX_flag_werror_uwo+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag_werror_uwo=no else mu_cv_CXX_flag_werror_uwo=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag_werror_uwo" >&5 $as_echo "$mu_cv_CXX_flag_werror_uwo" >&6; } if test "$mu_cv_CXX_flag_werror_uwo" = yes; then : ACM_CXX_WARNINGFAIL="-Werror=unknown-warning-option" else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Don't know how to make $CXX fail with unknown warning options," >&5 $as_echo "$as_me: WARNING: Don't know how to make $CXX fail with unknown warning options," >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: so later tests may (wrongly) decide to pass them to it anyway." >&5 $as_echo "$as_me: WARNING: so later tests may (wrongly) decide to pass them to it anyway." >&2;} fi fi CXXFLAGS=$acm_save__ACM_COMPILER_WERROR_UNKNOWN_WARNING_OPTION_CXXFLAGS ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test "$mu_cv_enable_clang_almost_everything" != no; then : CFLAGS="${CFLAGS:+$CFLAGS }-Weverything" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-c99-extensions" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-vla-extension" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-vla" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-gnu-zero-variadic-macro-arguments" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-variadic-macros" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-disabled-macro-expansion" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-undef" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-padded" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-packed" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-documentation-html" CFLAGS="${CFLAGS:+$CFLAGS }-Wno-documentation-unknown-command" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Weverything" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-c99-extensions" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-vla-extension" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-vla" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-gnu-zero-variadic-macro-arguments" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-variadic-macros" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-disabled-macro-expansion" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-undef" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-padded" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-packed" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-documentation-html" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-documentation-unknown-command" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-c++11-long-long" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-exit-time-destructors" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-global-constructors" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-weak-vtables" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-weak-template-vtables" CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-shadow" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$ACM_C_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Wno-reserved-id-macro" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wno-reserved-id-macro" >&5 $as_echo_n "checking if $CC supports -Wno-reserved-id-macro... " >&6; } if ${mu_cv_C_flag__Wno_reserved_id_macro+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag__Wno_reserved_id_macro=yes else mu_cv_C_flag__Wno_reserved_id_macro=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag__Wno_reserved_id_macro" >&5 $as_echo "$mu_cv_C_flag__Wno_reserved_id_macro" >&6; } if test "x$mu_cv_C_flag__Wno_reserved_id_macro" = xyes; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi if test "x$mu_cv_C_flag__Wno_reserved_id_macro" = xyes; then : CFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS }-Wno-reserved-id-macro" else CFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$ACM_C_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Wno-format-pedantic" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wno-format-pedantic" >&5 $as_echo_n "checking if $CC supports -Wno-format-pedantic... " >&6; } if ${mu_cv_C_flag__Wno_format_pedantic+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag__Wno_format_pedantic=yes else mu_cv_C_flag__Wno_format_pedantic=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag__Wno_format_pedantic" >&5 $as_echo "$mu_cv_C_flag__Wno_format_pedantic" >&6; } if test "x$mu_cv_C_flag__Wno_format_pedantic" = xyes; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi if test "x$mu_cv_C_flag__Wno_format_pedantic" = xyes; then : CFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS }-Wno-format-pedantic" else CFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$ACM_C_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Wno-double-promotion" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wno-double-promotion" >&5 $as_echo_n "checking if $CC supports -Wno-double-promotion... " >&6; } if ${mu_cv_C_flag__Wno_double_promotion+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag__Wno_double_promotion=yes else mu_cv_C_flag__Wno_double_promotion=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag__Wno_double_promotion" >&5 $as_echo "$mu_cv_C_flag__Wno_double_promotion" >&6; } if test "x$mu_cv_C_flag__Wno_double_promotion" = xyes; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi if test "x$mu_cv_C_flag__Wno_double_promotion" = xyes; then : CFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS }-Wno-double-promotion" else CFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-reserved-id-macro" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wno-reserved-id-macro" >&5 $as_echo_n "checking if $CXX supports -Wno-reserved-id-macro... " >&6; } if ${mu_cv_CXX_flag__Wno_reserved_id_macro+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wno_reserved_id_macro=yes else mu_cv_CXX_flag__Wno_reserved_id_macro=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wno_reserved_id_macro" >&5 $as_echo "$mu_cv_CXX_flag__Wno_reserved_id_macro" >&6; } if test "x$mu_cv_CXX_flag__Wno_reserved_id_macro" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wno_reserved_id_macro" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wno-reserved-id-macro" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-format-pedantic" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wno-format-pedantic" >&5 $as_echo_n "checking if $CXX supports -Wno-format-pedantic... " >&6; } if ${mu_cv_CXX_flag__Wno_format_pedantic+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wno_format_pedantic=yes else mu_cv_CXX_flag__Wno_format_pedantic=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wno_format_pedantic" >&5 $as_echo "$mu_cv_CXX_flag__Wno_format_pedantic" >&6; } if test "x$mu_cv_CXX_flag__Wno_format_pedantic" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wno_format_pedantic" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wno-format-pedantic" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-double-promotion" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wno-double-promotion" >&5 $as_echo_n "checking if $CXX supports -Wno-double-promotion... " >&6; } if ${mu_cv_CXX_flag__Wno_double_promotion+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wno_double_promotion=yes else mu_cv_CXX_flag__Wno_double_promotion=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wno_double_promotion" >&5 $as_echo "$mu_cv_CXX_flag__Wno_double_promotion" >&6; } if test "x$mu_cv_CXX_flag__Wno_double_promotion" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wno_double_promotion" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wno-double-promotion" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-shadow-field-in-constructor" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wno-shadow-field-in-constructor" >&5 $as_echo_n "checking if $CXX supports -Wno-shadow-field-in-constructor... " >&6; } if ${mu_cv_CXX_flag__Wno_shadow_field_in_constructor+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wno_shadow_field_in_constructor=yes else mu_cv_CXX_flag__Wno_shadow_field_in_constructor=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wno_shadow_field_in_constructor" >&5 $as_echo "$mu_cv_CXX_flag__Wno_shadow_field_in_constructor" >&6; } if test "x$mu_cv_CXX_flag__Wno_shadow_field_in_constructor" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wno_shadow_field_in_constructor" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wno-shadow-field-in-constructor" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu case $CXX_STANDARD in #( *++98|*++03) : ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-suggest-destructor-override" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wno-suggest-destructor-override" >&5 $as_echo_n "checking if $CXX supports -Wno-suggest-destructor-override... " >&6; } if ${mu_cv_CXX_flag__Wno_suggest_destructor_override+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wno_suggest_destructor_override=yes else mu_cv_CXX_flag__Wno_suggest_destructor_override=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wno_suggest_destructor_override" >&5 $as_echo "$mu_cv_CXX_flag__Wno_suggest_destructor_override" >&6; } if test "x$mu_cv_CXX_flag__Wno_suggest_destructor_override" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wno_suggest_destructor_override" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wno-suggest-destructor-override" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wno-suggest-override" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wno-suggest-override" >&5 $as_echo_n "checking if $CXX supports -Wno-suggest-override... " >&6; } if ${mu_cv_CXX_flag__Wno_suggest_override+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wno_suggest_override=yes else mu_cv_CXX_flag__Wno_suggest_override=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wno_suggest_override" >&5 $as_echo "$mu_cv_CXX_flag__Wno_suggest_override" >&6; } if test "x$mu_cv_CXX_flag__Wno_suggest_override" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wno_suggest_override" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wno-suggest-override" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ;; #( *) : ;; esac fi if test "$mu_cv_enable_code_suggestions" = yes; then : ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$ACM_C_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Wsuggest-attribute=format" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=format" >&5 $as_echo_n "checking if $CC supports -Wsuggest-attribute=format... " >&6; } if ${mu_cv_C_flag__Wsuggest_attribute_format+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag__Wsuggest_attribute_format=yes else mu_cv_C_flag__Wsuggest_attribute_format=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag__Wsuggest_attribute_format" >&5 $as_echo "$mu_cv_C_flag__Wsuggest_attribute_format" >&6; } if test "x$mu_cv_C_flag__Wsuggest_attribute_format" = xyes; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi if test "x$mu_cv_C_flag__Wsuggest_attribute_format" = xyes; then : CFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS }-Wsuggest-attribute=format" else CFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$ACM_C_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Wsuggest-attribute=const" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=const" >&5 $as_echo_n "checking if $CC supports -Wsuggest-attribute=const... " >&6; } if ${mu_cv_C_flag__Wsuggest_attribute_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag__Wsuggest_attribute_const=yes else mu_cv_C_flag__Wsuggest_attribute_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag__Wsuggest_attribute_const" >&5 $as_echo "$mu_cv_C_flag__Wsuggest_attribute_const" >&6; } if test "x$mu_cv_C_flag__Wsuggest_attribute_const" = xyes; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi if test "x$mu_cv_C_flag__Wsuggest_attribute_const" = xyes; then : CFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS }-Wsuggest-attribute=const" else CFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$ACM_C_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Wsuggest-attribute=pure" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=pure" >&5 $as_echo_n "checking if $CC supports -Wsuggest-attribute=pure... " >&6; } if ${mu_cv_C_flag__Wsuggest_attribute_pure+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag__Wsuggest_attribute_pure=yes else mu_cv_C_flag__Wsuggest_attribute_pure=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag__Wsuggest_attribute_pure" >&5 $as_echo "$mu_cv_C_flag__Wsuggest_attribute_pure" >&6; } if test "x$mu_cv_C_flag__Wsuggest_attribute_pure" = xyes; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi if test "x$mu_cv_C_flag__Wsuggest_attribute_pure" = xyes; then : CFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS }-Wsuggest-attribute=pure" else CFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$ACM_C_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Wsuggest-attribute=noreturn" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=noreturn" >&5 $as_echo_n "checking if $CC supports -Wsuggest-attribute=noreturn... " >&6; } if ${mu_cv_C_flag__Wsuggest_attribute_noreturn+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag__Wsuggest_attribute_noreturn=yes else mu_cv_C_flag__Wsuggest_attribute_noreturn=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag__Wsuggest_attribute_noreturn" >&5 $as_echo "$mu_cv_C_flag__Wsuggest_attribute_noreturn" >&6; } if test "x$mu_cv_C_flag__Wsuggest_attribute_noreturn" = xyes; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi if test "x$mu_cv_C_flag__Wsuggest_attribute_noreturn" = xyes; then : CFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS }-Wsuggest-attribute=noreturn" else CFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$ACM_C_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Wsuggest-attribute=malloc" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=malloc" >&5 $as_echo_n "checking if $CC supports -Wsuggest-attribute=malloc... " >&6; } if ${mu_cv_C_flag__Wsuggest_attribute_malloc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag__Wsuggest_attribute_malloc=yes else mu_cv_C_flag__Wsuggest_attribute_malloc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag__Wsuggest_attribute_malloc" >&5 $as_echo "$mu_cv_C_flag__Wsuggest_attribute_malloc" >&6; } if test "x$mu_cv_C_flag__Wsuggest_attribute_malloc" = xyes; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi if test "x$mu_cv_C_flag__Wsuggest_attribute_malloc" = xyes; then : CFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS }-Wsuggest-attribute=malloc" else CFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$ACM_C_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }-Wsuggest-attribute=cold" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports -Wsuggest-attribute=cold" >&5 $as_echo_n "checking if $CC supports -Wsuggest-attribute=cold... " >&6; } if ${mu_cv_C_flag__Wsuggest_attribute_cold+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : mu_cv_C_flag__Wsuggest_attribute_cold=yes else mu_cv_C_flag__Wsuggest_attribute_cold=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_C_flag__Wsuggest_attribute_cold" >&5 $as_echo "$mu_cv_C_flag__Wsuggest_attribute_cold" >&6; } if test "x$mu_cv_C_flag__Wsuggest_attribute_cold" = xyes; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi if test "x$mu_cv_C_flag__Wsuggest_attribute_cold" = xyes; then : CFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS }-Wsuggest-attribute=cold" else CFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wsuggest-attribute=format" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=format" >&5 $as_echo_n "checking if $CXX supports -Wsuggest-attribute=format... " >&6; } if ${mu_cv_CXX_flag__Wsuggest_attribute_format+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wsuggest_attribute_format=yes else mu_cv_CXX_flag__Wsuggest_attribute_format=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wsuggest_attribute_format" >&5 $as_echo "$mu_cv_CXX_flag__Wsuggest_attribute_format" >&6; } if test "x$mu_cv_CXX_flag__Wsuggest_attribute_format" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wsuggest_attribute_format" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wsuggest-attribute=format" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wsuggest-attribute=const" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=const" >&5 $as_echo_n "checking if $CXX supports -Wsuggest-attribute=const... " >&6; } if ${mu_cv_CXX_flag__Wsuggest_attribute_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wsuggest_attribute_const=yes else mu_cv_CXX_flag__Wsuggest_attribute_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wsuggest_attribute_const" >&5 $as_echo "$mu_cv_CXX_flag__Wsuggest_attribute_const" >&6; } if test "x$mu_cv_CXX_flag__Wsuggest_attribute_const" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wsuggest_attribute_const" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wsuggest-attribute=const" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wsuggest-attribute=pure" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=pure" >&5 $as_echo_n "checking if $CXX supports -Wsuggest-attribute=pure... " >&6; } if ${mu_cv_CXX_flag__Wsuggest_attribute_pure+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wsuggest_attribute_pure=yes else mu_cv_CXX_flag__Wsuggest_attribute_pure=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wsuggest_attribute_pure" >&5 $as_echo "$mu_cv_CXX_flag__Wsuggest_attribute_pure" >&6; } if test "x$mu_cv_CXX_flag__Wsuggest_attribute_pure" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wsuggest_attribute_pure" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wsuggest-attribute=pure" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wsuggest-attribute=noreturn" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=noreturn" >&5 $as_echo_n "checking if $CXX supports -Wsuggest-attribute=noreturn... " >&6; } if ${mu_cv_CXX_flag__Wsuggest_attribute_noreturn+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wsuggest_attribute_noreturn=yes else mu_cv_CXX_flag__Wsuggest_attribute_noreturn=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wsuggest_attribute_noreturn" >&5 $as_echo "$mu_cv_CXX_flag__Wsuggest_attribute_noreturn" >&6; } if test "x$mu_cv_CXX_flag__Wsuggest_attribute_noreturn" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wsuggest_attribute_noreturn" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wsuggest-attribute=noreturn" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wsuggest-attribute=malloc" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=malloc" >&5 $as_echo_n "checking if $CXX supports -Wsuggest-attribute=malloc... " >&6; } if ${mu_cv_CXX_flag__Wsuggest_attribute_malloc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wsuggest_attribute_malloc=yes else mu_cv_CXX_flag__Wsuggest_attribute_malloc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wsuggest_attribute_malloc" >&5 $as_echo "$mu_cv_CXX_flag__Wsuggest_attribute_malloc" >&6; } if test "x$mu_cv_CXX_flag__Wsuggest_attribute_malloc" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wsuggest_attribute_malloc" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wsuggest-attribute=malloc" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Wsuggest-attribute=cold" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Wsuggest-attribute=cold" >&5 $as_echo_n "checking if $CXX supports -Wsuggest-attribute=cold... " >&6; } if ${mu_cv_CXX_flag__Wsuggest_attribute_cold+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Wsuggest_attribute_cold=yes else mu_cv_CXX_flag__Wsuggest_attribute_cold=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Wsuggest_attribute_cold" >&5 $as_echo "$mu_cv_CXX_flag__Wsuggest_attribute_cold" >&6; } if test "x$mu_cv_CXX_flag__Wsuggest_attribute_cold" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Wsuggest_attribute_cold" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Wsuggest-attribute=cold" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi if test "$mu_cv_enable_fortify_source" != no; then : cat >>confdefs.h <<_ACEOF #define _FORTIFY_SOURCE $mu_cv_enable_fortify_source _ACEOF fi case $mu_cv_enable_stack_protector in #( no) : ;; #( ''|*[!0-9]*) : acm_save_ACM_FOREACH_1_IFS=$IFS IFS=, acm_compile_link_opt_list="-fstack-protector-$mu_cv_enable_stack_protector" for compile_link_opt in $acm_compile_link_opt_list; do IFS=$acm_save_ACM_FOREACH_1_IFS compile_link_opt="${compile_link_opt#"${compile_link_opt%%[![:space:]]*}"}" compile_link_opt="${compile_link_opt%"${compile_link_opt##*[![:space:]]}"}" acm_save_ACM_ADD_COMPILE_LINK_OPTION_CFLAGS=$CFLAGS acm_save_ACM_ADD_COMPILE_LINK_OPTION_CXXFLAGS=$CXXFLAGS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu acm_save_ACM_FOREACH_2_IFS=$IFS IFS=, acm_compiler_opt_list="$compile_link_opt" for compiler_opt in $acm_compiler_opt_list; do IFS=$acm_save_ACM_FOREACH_2_IFS compiler_opt="${compiler_opt#"${compiler_opt%%[![:space:]]*}"}" compiler_opt="${compiler_opt%"${compiler_opt##*[![:space:]]}"}" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$compiler_opt" as_cachevar=`$as_echo "mu_cv_C_flag_$compiler_opt" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports $compiler_opt" >&5 $as_echo_n "checking if $CC supports $compiler_opt... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi done IFS=$acm_save_ACM_FOREACH_2_IFS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save_ACM_FOREACH_2_IFS=$IFS IFS=, acm_compiler_opt_list="$compile_link_opt" for compiler_opt in $acm_compiler_opt_list; do IFS=$acm_save_ACM_FOREACH_2_IFS compiler_opt="${compiler_opt#"${compiler_opt%%[![:space:]]*}"}" compiler_opt="${compiler_opt%"${compiler_opt##*[![:space:]]}"}" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$compiler_opt" as_cachevar=`$as_echo "mu_cv_CXX_flag_$compiler_opt" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports $compiler_opt" >&5 $as_echo_n "checking if $CXX supports $compiler_opt... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi done IFS=$acm_save_ACM_FOREACH_2_IFS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu as_compvar=`$as_echo "mu_cv_C_flag_$compile_link_opt" | $as_tr_sh` if eval test \"x\$"$as_compvar"\" = x"yes"; then : ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu acm_save__ACM_ADD_LINKER_OPTION_LDFLAGS=$LDFLAGS LDFLAGS="${LDFLAGS:+$LDFLAGS }$compile_link_opt" as_cachevar=`$as_echo "mu_cv_ldflag_$compile_link_opt" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if linker supports $compile_link_opt" >&5 $as_echo_n "checking if linker supports $compile_link_opt... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : else LDFLAGS=$acm_save__ACM_ADD_LINKER_OPTION_LDFLAGS fi as_linkvar=`$as_echo "mu_cv_ldflag_$compile_link_opt" | $as_tr_sh` if eval test \"x\$"$as_linkvar"\" = x"no"; then : CFLAGS=$acm_save_ACM_ADD_COMPILE_LINK_OPTION_CFLAGS CXXFLAGS=$acm_save_ACM_ADD_COMPILE_LINK_OPTION_CXXFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu else CFLAGS=$acm_save_ACM_ADD_COMPILE_LINK_OPTION_CFLAGS CXXFLAGS=$acm_save_ACM_ADD_COMPILE_LINK_OPTION_CXXFLAGS fi done IFS=$acm_save_ACM_FOREACH_1_IFS as_spvar=`$as_echo "mu_cv_ldflag_-fstack-protector-$mu_cv_enable_stack_protector" | $as_tr_sh` if eval test \"x\$"$as_spvar"\" = x"yes"; then : else mu_cv_enable_stack_protector=4 fi ;; #( *) : ;; esac case $mu_cv_enable_stack_protector in #( ''|*[!0-9]*) : ;; #( *) : acm_save_ACM_FOREACH_1_IFS=$IFS IFS=, acm_compile_link_opt_list="-fstack-protector --param ssp-buffer-size=$mu_cv_enable_stack_protector" for compile_link_opt in $acm_compile_link_opt_list; do IFS=$acm_save_ACM_FOREACH_1_IFS compile_link_opt="${compile_link_opt#"${compile_link_opt%%[![:space:]]*}"}" compile_link_opt="${compile_link_opt%"${compile_link_opt##*[![:space:]]}"}" acm_save_ACM_ADD_COMPILE_LINK_OPTION_CFLAGS=$CFLAGS acm_save_ACM_ADD_COMPILE_LINK_OPTION_CXXFLAGS=$CXXFLAGS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu acm_save_ACM_FOREACH_2_IFS=$IFS IFS=, acm_compiler_opt_list="$compile_link_opt" for compiler_opt in $acm_compiler_opt_list; do IFS=$acm_save_ACM_FOREACH_2_IFS compiler_opt="${compiler_opt#"${compiler_opt%%[![:space:]]*}"}" compiler_opt="${compiler_opt%"${compiler_opt##*[![:space:]]}"}" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$compiler_opt" as_cachevar=`$as_echo "mu_cv_C_flag_$compiler_opt" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports $compiler_opt" >&5 $as_echo_n "checking if $CC supports $compiler_opt... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi done IFS=$acm_save_ACM_FOREACH_2_IFS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save_ACM_FOREACH_2_IFS=$IFS IFS=, acm_compiler_opt_list="$compile_link_opt" for compiler_opt in $acm_compiler_opt_list; do IFS=$acm_save_ACM_FOREACH_2_IFS compiler_opt="${compiler_opt#"${compiler_opt%%[![:space:]]*}"}" compiler_opt="${compiler_opt%"${compiler_opt##*[![:space:]]}"}" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$compiler_opt" as_cachevar=`$as_echo "mu_cv_CXX_flag_$compiler_opt" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports $compiler_opt" >&5 $as_echo_n "checking if $CXX supports $compiler_opt... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi done IFS=$acm_save_ACM_FOREACH_2_IFS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu as_compvar=`$as_echo "mu_cv_C_flag_$compile_link_opt" | $as_tr_sh` if eval test \"x\$"$as_compvar"\" = x"yes"; then : ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu acm_save__ACM_ADD_LINKER_OPTION_LDFLAGS=$LDFLAGS LDFLAGS="${LDFLAGS:+$LDFLAGS }$compile_link_opt" as_cachevar=`$as_echo "mu_cv_ldflag_$compile_link_opt" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if linker supports $compile_link_opt" >&5 $as_echo_n "checking if linker supports $compile_link_opt... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : else LDFLAGS=$acm_save__ACM_ADD_LINKER_OPTION_LDFLAGS fi as_linkvar=`$as_echo "mu_cv_ldflag_$compile_link_opt" | $as_tr_sh` if eval test \"x\$"$as_linkvar"\" = x"no"; then : CFLAGS=$acm_save_ACM_ADD_COMPILE_LINK_OPTION_CFLAGS CXXFLAGS=$acm_save_ACM_ADD_COMPILE_LINK_OPTION_CXXFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu else CFLAGS=$acm_save_ACM_ADD_COMPILE_LINK_OPTION_CFLAGS CXXFLAGS=$acm_save_ACM_ADD_COMPILE_LINK_OPTION_CXXFLAGS fi done IFS=$acm_save_ACM_FOREACH_1_IFS ;; #( *) : ;; esac if test "$mu_cv_enable_relro" = yes; then : acm_save__ACM_ADD_LINKER_OPTION_LDFLAGS=$LDFLAGS LDFLAGS="${LDFLAGS:+$LDFLAGS }-Wl,-z,relro" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if linker supports -Wl,-z,relro" >&5 $as_echo_n "checking if linker supports -Wl,-z,relro... " >&6; } if ${mu_cv_ldflag__Wl__z_relro+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : mu_cv_ldflag__Wl__z_relro=yes else mu_cv_ldflag__Wl__z_relro=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_ldflag__Wl__z_relro" >&5 $as_echo "$mu_cv_ldflag__Wl__z_relro" >&6; } if test "x$mu_cv_ldflag__Wl__z_relro" = xyes; then : else LDFLAGS=$acm_save__ACM_ADD_LINKER_OPTION_LDFLAGS fi fi if test "$mu_cv_enable_bind_now" = yes; then : acm_save__ACM_ADD_LINKER_OPTION_LDFLAGS=$LDFLAGS LDFLAGS="${LDFLAGS:+$LDFLAGS }-Wl,-z,now" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if linker supports -Wl,-z,now" >&5 $as_echo_n "checking if linker supports -Wl,-z,now... " >&6; } if ${mu_cv_ldflag__Wl__z_now+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : mu_cv_ldflag__Wl__z_now=yes else mu_cv_ldflag__Wl__z_now=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_ldflag__Wl__z_now" >&5 $as_echo "$mu_cv_ldflag__Wl__z_now" >&6; } if test "x$mu_cv_ldflag__Wl__z_now" = xyes; then : else LDFLAGS=$acm_save__ACM_ADD_LINKER_OPTION_LDFLAGS fi fi if test "$mu_cv_enable_san" != no; then : acm_save_ACM_FOREACH_1_IFS=$IFS IFS=, acm_san_type_list="$mu_cv_enable_san" for san_type in $acm_san_type_list; do IFS=$acm_save_ACM_FOREACH_1_IFS san_type="${san_type#"${san_type%%[![:space:]]*}"}" san_type="${san_type%"${san_type##*[![:space:]]}"}" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu acm_save_ACM_FOREACH_2_IFS=$IFS IFS=, acm_compiler_opt_list="-fsanitize=$san_type" for compiler_opt in $acm_compiler_opt_list; do IFS=$acm_save_ACM_FOREACH_2_IFS compiler_opt="${compiler_opt#"${compiler_opt%%[![:space:]]*}"}" compiler_opt="${compiler_opt%"${compiler_opt##*[![:space:]]}"}" acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS=$CFLAGS CFLAGS="${CFLAGS:+$CFLAGS }$compiler_opt" as_cachevar=`$as_echo "mu_cv_C_flag_$compiler_opt" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports $compiler_opt" >&5 $as_echo_n "checking if $CC supports $compiler_opt... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : else CFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CFLAGS fi done IFS=$acm_save_ACM_FOREACH_2_IFS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save_ACM_FOREACH_2_IFS=$IFS IFS=, acm_compiler_opt_list="-fsanitize=$san_type" for compiler_opt in $acm_compiler_opt_list; do IFS=$acm_save_ACM_FOREACH_2_IFS compiler_opt="${compiler_opt#"${compiler_opt%%[![:space:]]*}"}" compiler_opt="${compiler_opt%"${compiler_opt##*[![:space:]]}"}" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$compiler_opt" as_cachevar=`$as_echo "mu_cv_CXX_flag_$compiler_opt" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports $compiler_opt" >&5 $as_echo_n "checking if $CXX supports $compiler_opt... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi done IFS=$acm_save_ACM_FOREACH_2_IFS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu as_cachevar=`$as_echo "mu_cv_C_flag_-fsanitize=$san_type" | $as_tr_sh` if eval test \"x\$"$as_cachevar"\" = x"yes"; then : LDFLAGS="${LDFLAGS:+$LDFLAGS }-fsanitize=$san_type" case $san_type in #( address|memory|undefined) : case " ${CFLAGS} " in #( *" -fno-omit-frame-pointer "*) : ;; #( *) : CFLAGS="${CFLAGS:+$CFLAGS }-fno-omit-frame-pointer" ;; esac case " ${CXXFLAGS} " in #( *" -fno-omit-frame-pointer "*) : ;; #( *) : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-fno-omit-frame-pointer" ;; esac ;; #( *) : ;; esac fi done IFS=$acm_save_ACM_FOREACH_1_IFS fi case $host in *-*-freebsd* ) ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save__0_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-fno-guess-branch-probability" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX needs -fno-guess-branch-probability" >&5 $as_echo_n "checking if $CXX needs -fno-guess-branch-probability... " >&6; } if ${mu_cv_flag_guess_branch_probability+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifdef __clang__ using the clang compiler #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_flag_guess_branch_probability=yes else mu_cv_flag_guess_branch_probability=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_flag_guess_branch_probability" >&5 $as_echo "$mu_cv_flag_guess_branch_probability" >&6; } if test "$mu_cv_flag_guess_branch_probability" = yes; then : else CXXFLAGS=$acm_save__0_CXXFLAGS fi acm_save__0_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-fno-reorder-blocks" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX needs -fno-reorder-blocks" >&5 $as_echo_n "checking if $CXX needs -fno-reorder-blocks... " >&6; } if ${mu_cv_flag_reorder_blocks+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifdef __clang__ using the clang compiler #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_flag_reorder_blocks=yes else mu_cv_flag_reorder_blocks=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_flag_reorder_blocks" >&5 $as_echo "$mu_cv_flag_reorder_blocks" >&6; } if test "$mu_cv_flag_reorder_blocks" = yes; then : else CXXFLAGS=$acm_save__0_CXXFLAGS fi acm_save__0_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-fno-tree-dominator-opts" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX needs -fno-tree-dominator-opts" >&5 $as_echo_n "checking if $CXX needs -fno-tree-dominator-opts... " >&6; } if ${mu_cv_flag_tree_dominator_opts+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifdef __clang__ using the clang compiler #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_flag_tree_dominator_opts=yes else mu_cv_flag_tree_dominator_opts=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_flag_tree_dominator_opts" >&5 $as_echo "$mu_cv_flag_tree_dominator_opts" >&6; } if test "$mu_cv_flag_tree_dominator_opts" = yes; then : else CXXFLAGS=$acm_save__0_CXXFLAGS fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ;; esac PTHREAD_CPPFLAGS="-pthread" PTHREAD_LDFLAGS="-pthread" acm_save__0_CPPFLAGS=$CPPFLAGS CPPFLAGS="${CPPFLAGS:+$CPPFLAGS }$PTHREAD_CPPFLAGS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if _REENTRANT is defined by the compiler" >&5 $as_echo_n "checking if _REENTRANT is defined by the compiler... " >&6; } if ${mu_cv_have_reentrant+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef _REENTRANT #error "_REENTRANT was not defined" #endif int main () { ; return 0; } _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : mu_cv_have_reentrant=yes else mu_cv_have_reentrant=no fi rm -f conftest.err conftest.i conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_have_reentrant" >&5 $as_echo "$mu_cv_have_reentrant" >&6; } CPPFLAGS=$acm_save__0_CPPFLAGS if test "$mu_cv_have_reentrant" != yes; then : PTHREAD_CPPFLAGS="${PTHREAD_CPPFLAGS:+$PTHREAD_CPPFLAGS }-D_REENTRANT" fi ARFLAGS=rDvs if test "$mu_cv_enable_bison_deprecated_warnings" = no; then : if test "$YACC" != ":"; then : if $YACC -Wno-deprecated -V > /dev/null 2>&1; then : mu_yacc_flags=" -Wno-deprecated" { $as_echo "$as_me:${as_lineno-$LINENO}: disabled bison3 deprecation warnings" >&5 $as_echo "$as_me: disabled bison3 deprecation warnings" >&6;} fi fi fi YACCFLAGS="-d$mu_yacc_flags \$(EXTRAYACCFLAGS)" LEXFLAGS="\$(EXTRALEXFLAGS)" { mu_yacc_flags=; unset mu_yacc_flags;} cat >>confdefs.h <<_ACEOF #define EMDEBUG $(test "$mu_cv_enable_debug" != yes)$? _ACEOF cat >>confdefs.h <<_ACEOF #define EM_USE_VALGRIND_FRIENDLY $(test "$mu_cv_enable_valgrind_friendly" != yes)$? _ACEOF cat >>confdefs.h <<_ACEOF #define EM_SYSTEM_RUNDIR "$SYSTEM_RUNDIR" _ACEOF # Check whether --enable-wide_strings was given. if test "${enable_wide_strings+set}" = set; then : enableval=$enable_wide_strings; mu_cv_enable_wide_strings=$enableval else mu_cv_enable_wide_strings=no fi cat >>confdefs.h <<_ACEOF #define EM_USE_WIDE_STRINGS $(test "$mu_cv_enable_wide_strings" != yes)$? _ACEOF if test "$mu_cv_enable_wide_strings" = yes; then : makeup_build_flavour="${makeup_build_flavour}w" case $host in *-*-cygwin* | *-*-mingw32* ) $as_echo "#define UNICODE 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: using UNICODE" >&5 $as_echo "$as_me: using UNICODE" >&6;} ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in xlocale.h do : ac_fn_c_check_header_mongrel "$LINENO" "xlocale.h" "ac_cv_header_xlocale_h" "$ac_includes_default" if test "x$ac_cv_header_xlocale_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_XLOCALE_H 1 _ACEOF fi done for ac_func in newlocale do : ac_fn_c_check_func "$LINENO" "newlocale" "ac_cv_func_newlocale" if test "x$ac_cv_func_newlocale" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_NEWLOCALE 1 _ACEOF fi done if test "$ac_cv_func_newlocale" = yes; then : else acm_save__0_LIBS=$LIBS case " ${LIBS} " in #( *" -lmsvcr110 "*) : ;; #( *) : LIBS="-lmsvcr110${LIBS:+ $LIBS}" ;; esac for ac_func in _create_locale do : ac_fn_c_check_func "$LINENO" "_create_locale" "ac_cv_func__create_locale" if test "x$ac_cv_func__create_locale" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE__CREATE_LOCALE 1 _ACEOF fi done LIBS=$acm_save__0_LIBS fi for ac_func in strtod_l _strtod_l do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF break fi done if test "$ac_cv_func_strtod_l" = yes; then : elif test "$ac_cv_func__strtod_l" = yes; then : else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: No localisable strtod available on this system." >&5 $as_echo "$as_me: WARNING: No localisable strtod available on this system." >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Binaries will need to be run in the \"C\" locale." >&5 $as_echo "$as_me: WARNING: Binaries will need to be run in the \"C\" locale." >&2;} fi # Check whether --with-iconv was given. if test "${with_iconv+set}" = set; then : withval=$with_iconv; mu_cv_with_iconv=$withval else mu_cv_with_iconv=yes fi # Check whether --with-gettext was given. if test "${with_gettext+set}" = set; then : withval=$with_gettext; mu_cv_with_gettext=$withval else mu_cv_with_gettext=yes fi if test -n "${ac_aux_dir#$srcdir/}"; then _filedest=$srcdir/${ac_aux_dir#$srcdir/} mkdir -p $_filedest else _filedest=$srcdir fi if test -n "/usr/share/gettext"; then _filesources="/usr/share/gettext" else _filesources="/usr/share/misc /usr/share/automake* /usr/share/libtool" fi if test ! -e "$_filedest/config.rpath" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $_filedest/config.rpath" >&5 $as_echo_n "checking for $_filedest/config.rpath... " >&6; } ( cd $_filedest for d in ".." "../.." ; do if test -r "$d/config.rpath" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: linking from $d/config.rpath." >&5 $as_echo "linking from $d/config.rpath." >&6; } $LN_S "$d/config.rpath" . break fi done ) if test ! -e "$_filedest/config.rpath" ; then for d in $_filesources; do if test -r "$d/config.rpath" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: copying from $d/config.rpath." >&5 $as_echo "copying from $d/config.rpath." >&6; } cp -a "$d/config.rpath" "$_filedest/config.rpath" break fi done fi if test ! -e "$_filedest/config.rpath" ; then as_fn_error $? "Failed to locate config.rpath. Stopping." "$LINENO" 5 fi fi mu_lsan_enabled=no if test "x$mu_cv_C_flag__fsanitize_address" = xyes; then : mu_lsan_enabled=yes fi if test "x$mu_cv_C_flag__fsanitize_leak" = xyes; then : mu_lsan_enabled=yes fi if test "$mu_lsan_enabled" = yes; then : if ${mu_suppress_lsan_iconv+:} false; then : as_fn_error $? "LSan suppression for 'iconv' is already active" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: Disabling LSan for iconv ..." >&5 $as_echo "$as_me: Disabling LSan for iconv ..." >&6;} if test -n "$ASAN_OPTIONS"; then : mu_suppress_lsan_iconv=$ASAN_OPTIONS export ASAN_OPTIONS="$ASAN_OPTIONS:detect_leaks=0" else mu_suppress_lsan_iconv=yes export ASAN_OPTIONS="detect_leaks=0" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 $as_echo_n "checking for a thread-safe mkdir -p... " >&6; } if test -z "$MKDIR_P"; then if ${ac_cv_path_mkdir+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi test -d ./--version && rmdir ./--version if test "${ac_cv_path_mkdir+set}" = set; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. MKDIR_P="$ac_install_sh -d" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 $as_echo "$MKDIR_P" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if ${ac_cv_path_SED+:} false; then : $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed { ac_script=; unset ac_script;} if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_SED" || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether NLS is requested" >&5 $as_echo_n "checking whether NLS is requested... " >&6; } # Check whether --enable-nls was given. if test "${enable_nls+set}" = set; then : enableval=$enable_nls; USE_NLS=$enableval else USE_NLS=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } GETTEXT_MACRO_VERSION=0.20 # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then # Determine PATH_SEPARATOR by trying to find /bin/sh in a PATH which # contains only /bin. Note that ksh looks also at the FPATH variable, # so we have to set that as well for the test. PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ || PATH_SEPARATOR=';' } fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "msgfmt", so it can be a program name with args. set dummy msgfmt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MSGFMT+:} false; then : $as_echo_n "(cached) " >&6 else case "$MSGFMT" in [\\/]* | ?:[\\/]*) ac_cv_path_MSGFMT="$MSGFMT" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&5 if $ac_dir/$ac_word --statistics /dev/null >&5 2>&1 && (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then ac_cv_path_MSGFMT="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_MSGFMT" && ac_cv_path_MSGFMT=":" ;; esac fi MSGFMT="$ac_cv_path_MSGFMT" if test "$MSGFMT" != ":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGFMT" >&5 $as_echo "$MSGFMT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "gmsgfmt", so it can be a program name with args. set dummy gmsgfmt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_GMSGFMT+:} false; then : $as_echo_n "(cached) " >&6 else case $GMSGFMT in [\\/]* | ?:[\\/]*) ac_cv_path_GMSGFMT="$GMSGFMT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_GMSGFMT="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_GMSGFMT" && ac_cv_path_GMSGFMT="$MSGFMT" ;; esac fi GMSGFMT=$ac_cv_path_GMSGFMT if test -n "$GMSGFMT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GMSGFMT" >&5 $as_echo "$GMSGFMT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi case `$GMSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) GMSGFMT_015=: ;; *) GMSGFMT_015=$GMSGFMT ;; esac # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then # Determine PATH_SEPARATOR by trying to find /bin/sh in a PATH which # contains only /bin. Note that ksh looks also at the FPATH variable, # so we have to set that as well for the test. PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ || PATH_SEPARATOR=';' } fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "xgettext", so it can be a program name with args. set dummy xgettext; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XGETTEXT+:} false; then : $as_echo_n "(cached) " >&6 else case "$XGETTEXT" in [\\/]* | ?:[\\/]*) ac_cv_path_XGETTEXT="$XGETTEXT" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&5 if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&5 2>&1 && (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then ac_cv_path_XGETTEXT="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_XGETTEXT" && ac_cv_path_XGETTEXT=":" ;; esac fi XGETTEXT="$ac_cv_path_XGETTEXT" if test "$XGETTEXT" != ":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XGETTEXT" >&5 $as_echo "$XGETTEXT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi rm -f messages.po case `$XGETTEXT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) XGETTEXT_015=: ;; *) XGETTEXT_015=$XGETTEXT ;; esac # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then # Determine PATH_SEPARATOR by trying to find /bin/sh in a PATH which # contains only /bin. Note that ksh looks also at the FPATH variable, # so we have to set that as well for the test. PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ || PATH_SEPARATOR=';' } fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "msgmerge", so it can be a program name with args. set dummy msgmerge; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MSGMERGE+:} false; then : $as_echo_n "(cached) " >&6 else case "$MSGMERGE" in [\\/]* | ?:[\\/]*) ac_cv_path_MSGMERGE="$MSGMERGE" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&5 if $ac_dir/$ac_word --update -q /dev/null /dev/null >&5 2>&1; then ac_cv_path_MSGMERGE="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_MSGMERGE" && ac_cv_path_MSGMERGE=":" ;; esac fi MSGMERGE="$ac_cv_path_MSGMERGE" if test "$MSGMERGE" != ":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGMERGE" >&5 $as_echo "$MSGMERGE" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if LC_ALL=C $MSGMERGE --help | grep ' --for-msgfmt ' >/dev/null; then MSGMERGE_FOR_MSGFMT_OPTION='--for-msgfmt' else if LC_ALL=C $MSGMERGE --help | grep ' --no-fuzzy-matching ' >/dev/null; then MSGMERGE_FOR_MSGFMT_OPTION='--no-fuzzy-matching --no-location --quiet' else MSGMERGE_FOR_MSGFMT_OPTION='--no-location --quiet' fi fi test -n "${XGETTEXT_EXTRA_OPTIONS+set}" || XGETTEXT_EXTRA_OPTIONS= ac_config_commands="$ac_config_commands po-directories" if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then # Determine PATH_SEPARATOR by trying to find /bin/sh in a PATH which # contains only /bin. Note that ksh looks also at the FPATH variable, # so we have to set that as well for the test. PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ || PATH_SEPARATOR=';' } fi if test -n "$LD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld" >&5 $as_echo_n "checking for ld... " >&6; } elif test "$GCC" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if test -n "$LD"; then # Let the user override the test with a path. : else if ${acl_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else acl_cv_path_LD= # Final result of this test ac_prog=ld # Program to search in $PATH if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw acl_output=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) acl_output=`($CC -print-prog-name=ld) 2>&5` ;; esac case $acl_output in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld acl_output=`echo "$acl_output" | sed 's%\\\\%/%g'` while echo "$acl_output" | grep "$re_direlt" > /dev/null 2>&1; do acl_output=`echo $acl_output | sed "s%$re_direlt%/%"` done # Got the pathname. No search in PATH is needed. acl_cv_path_LD="$acl_output" ac_prog= ;; "") # If it fails, then pretend we aren't using GCC. ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac fi if test -n "$ac_prog"; then # Search for $ac_prog in $PATH. acl_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$acl_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$acl_cv_path_LD" -v 2>&1 conftest.$ac_ext /* end confdefs.h. */ #if defined __powerpc64__ || defined _ARCH_PPC64 int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # The compiler produces 64-bit code. Add option '-b64' so that the # linker groks 64-bit object files. case "$acl_cv_path_LD " in *" -b64 "*) ;; *) acl_cv_path_LD="$acl_cv_path_LD -b64" ;; esac fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; sparc64-*-netbsd*) cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined __sparcv9 || defined __arch64__ int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else # The compiler produces 32-bit code. Add option '-m elf32_sparc' # so that the linker groks 32-bit object files. case "$acl_cv_path_LD " in *" -m elf32_sparc "*) ;; *) acl_cv_path_LD="$acl_cv_path_LD -m elf32_sparc" ;; esac fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; esac fi LD="$acl_cv_path_LD" fi if test -n "$LD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${acl_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$acl_cv_prog_gnu_ld" >&6; } with_gnu_ld=$acl_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shared library run path origin" >&5 $as_echo_n "checking for shared library run path origin... " >&6; } if ${acl_cv_rpath+:} false; then : $as_echo_n "(cached) " >&6 else CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acl_cv_rpath" >&5 $as_echo "$acl_cv_rpath" >&6; } wl="$acl_cv_wl" acl_libext="$acl_cv_libext" acl_shlibext="$acl_cv_shlibext" acl_libname_spec="$acl_cv_libname_spec" acl_library_names_spec="$acl_cv_library_names_spec" acl_hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" acl_hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" acl_hardcode_direct="$acl_cv_hardcode_direct" acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" # Check whether --enable-rpath was given. if test "${enable_rpath+set}" = set; then : enableval=$enable_rpath; : else enable_rpath=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking 32-bit host C ABI" >&5 $as_echo_n "checking 32-bit host C ABI... " >&6; } if ${gl_cv_host_cpu_c_abi_32bit+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$gl_cv_host_cpu_c_abi"; then case "$gl_cv_host_cpu_c_abi" in i386 | x86_64-x32 | arm | armhf | arm64-ilp32 | hppa | ia64-ilp32 | mips | mipsn32 | powerpc | riscv*-ilp32* | s390 | sparc) gl_cv_host_cpu_c_abi_32bit=yes ;; x86_64 | alpha | arm64 | hppa64 | ia64 | mips64 | powerpc64 | powerpc64-elfv2 | riscv*-lp64* | s390x | sparc64 ) gl_cv_host_cpu_c_abi_32bit=no ;; *) gl_cv_host_cpu_c_abi_32bit=unknown ;; esac else case "$host_cpu" in # CPUs that only support a 32-bit ABI. arc \ | bfin \ | cris* \ | csky \ | epiphany \ | ft32 \ | h8300 \ | m68k \ | microblaze | microblazeel \ | nds32 | nds32le | nds32be \ | nios2 | nios2eb | nios2el \ | or1k* \ | or32 \ | sh | sh1234 | sh1234elb \ | tic6x \ | xtensa* ) gl_cv_host_cpu_c_abi_32bit=yes ;; # CPUs that only support a 64-bit ABI. alpha | alphaev[4-8] | alphaev56 | alphapca5[67] | alphaev6[78] \ | mmix ) gl_cv_host_cpu_c_abi_32bit=no ;; i[34567]86 ) gl_cv_host_cpu_c_abi_32bit=yes ;; x86_64 ) # On x86_64 systems, the C compiler may be generating code in one of # these ABIs: # - 64-bit instruction set, 64-bit pointers, 64-bit 'long': x86_64. # - 64-bit instruction set, 64-bit pointers, 32-bit 'long': x86_64 # with native Windows (mingw, MSVC). # - 64-bit instruction set, 32-bit pointers, 32-bit 'long': x86_64-x32. # - 32-bit instruction set, 32-bit pointers, 32-bit 'long': i386. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if (defined __x86_64__ || defined __amd64__ \ || defined _M_X64 || defined _M_AMD64) \ && !(defined __ILP32__ || defined _ILP32) int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_host_cpu_c_abi_32bit=no else gl_cv_host_cpu_c_abi_32bit=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; arm* | aarch64 ) # Assume arm with EABI. # On arm64 systems, the C compiler may be generating code in one of # these ABIs: # - aarch64 instruction set, 64-bit pointers, 64-bit 'long': arm64. # - aarch64 instruction set, 32-bit pointers, 32-bit 'long': arm64-ilp32. # - 32-bit instruction set, 32-bit pointers, 32-bit 'long': arm or armhf. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined __aarch64__ && !(defined __ILP32__ || defined _ILP32) int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_host_cpu_c_abi_32bit=no else gl_cv_host_cpu_c_abi_32bit=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; hppa1.0 | hppa1.1 | hppa2.0* | hppa64 ) # On hppa, the C compiler may be generating 32-bit code or 64-bit # code. In the latter case, it defines _LP64 and __LP64__. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __LP64__ int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_host_cpu_c_abi_32bit=no else gl_cv_host_cpu_c_abi_32bit=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; ia64* ) # On ia64 on HP-UX, the C compiler may be generating 64-bit code or # 32-bit code. In the latter case, it defines _ILP32. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef _ILP32 int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_host_cpu_c_abi_32bit=yes else gl_cv_host_cpu_c_abi_32bit=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; mips* ) # We should also check for (_MIPS_SZPTR == 64), but gcc keeps this # at 32. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined _MIPS_SZLONG && (_MIPS_SZLONG == 64) int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_host_cpu_c_abi_32bit=no else gl_cv_host_cpu_c_abi_32bit=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; powerpc* ) # Different ABIs are in use on AIX vs. Mac OS X vs. Linux,*BSD. # No need to distinguish them here; the caller may distinguish # them based on the OS. # On powerpc64 systems, the C compiler may still be generating # 32-bit code. And on powerpc-ibm-aix systems, the C compiler may # be generating 64-bit code. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined __powerpc64__ || defined _ARCH_PPC64 int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_host_cpu_c_abi_32bit=no else gl_cv_host_cpu_c_abi_32bit=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; rs6000 ) gl_cv_host_cpu_c_abi_32bit=yes ;; riscv32 | riscv64 ) # There are 6 ABIs: ilp32, ilp32f, ilp32d, lp64, lp64f, lp64d. # Size of 'long' and 'void *': cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined __LP64__ int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_host_cpu_c_abi_32bit=no else gl_cv_host_cpu_c_abi_32bit=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; s390* ) # On s390x, the C compiler may be generating 64-bit (= s390x) code # or 31-bit (= s390) code. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined __LP64__ || defined __s390x__ int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_host_cpu_c_abi_32bit=no else gl_cv_host_cpu_c_abi_32bit=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; sparc | sparc64 ) # UltraSPARCs running Linux have `uname -m` = "sparc64", but the # C compiler still generates 32-bit code. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined __sparcv9 || defined __arch64__ int ok; #else error fail #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : gl_cv_host_cpu_c_abi_32bit=no else gl_cv_host_cpu_c_abi_32bit=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ;; *) gl_cv_host_cpu_c_abi_32bit=unknown ;; esac fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gl_cv_host_cpu_c_abi_32bit" >&5 $as_echo "$gl_cv_host_cpu_c_abi_32bit" >&6; } HOST_CPU_C_ABI_32BIT="$gl_cv_host_cpu_c_abi_32bit" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ELF binary format" >&5 $as_echo_n "checking for ELF binary format... " >&6; } if ${gl_cv_elf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __ELF__ Extensible Linking Format #endif _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "Extensible Linking Format" >/dev/null 2>&1; then : gl_cv_elf=yes else gl_cv_elf=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gl_cv_elf" >&5 $as_echo "$gl_cv_elf" >&6; } if test $gl_cv_elf; then # Extract the ELF class of a file (5th byte) in decimal. # Cf. https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header if od -A x < /dev/null >/dev/null 2>/dev/null; then # Use POSIX od. func_elfclass () { od -A n -t d1 -j 4 -N 1 } else # Use BSD hexdump. func_elfclass () { dd bs=1 count=1 skip=4 2>/dev/null | hexdump -e '1/1 "%3d "' echo } fi case $HOST_CPU_C_ABI_32BIT in yes) # 32-bit ABI. acl_is_expected_elfclass () { test "`func_elfclass | sed -e 's/[ ]//g'`" = 1 } ;; no) # 64-bit ABI. acl_is_expected_elfclass () { test "`func_elfclass | sed -e 's/[ ]//g'`" = 2 } ;; *) # Unknown. acl_is_expected_elfclass () { : } ;; esac else acl_is_expected_elfclass () { : } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the common suffixes of directories in the library search path" >&5 $as_echo_n "checking for the common suffixes of directories in the library search path... " >&6; } if ${acl_cv_libdirstems+:} false; then : $as_echo_n "(cached) " >&6 else acl_libdirstem=lib acl_libdirstem2= acl_libdirstem3= case "$host_os" in solaris*) if test $HOST_CPU_C_ABI_32BIT = no; then acl_libdirstem2=lib/64 case "$host_cpu" in sparc*) acl_libdirstem3=lib/sparcv9 ;; i*86 | x86_64) acl_libdirstem3=lib/amd64 ;; esac fi ;; *) searchpath=`(LC_ALL=C $CC $CPPFLAGS $CFLAGS -print-search-dirs) 2>/dev/null \ | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'` if test $HOST_CPU_C_ABI_32BIT != no; then # 32-bit or unknown ABI. if test -d /usr/lib32; then acl_libdirstem2=lib32 fi fi if test $HOST_CPU_C_ABI_32BIT != yes; then # 64-bit or unknown ABI. if test -d /usr/lib64; then acl_libdirstem3=lib64 fi fi if test -n "$searchpath"; then acl_save_IFS="${IFS= }"; IFS=":" for searchdir in $searchpath; do if test -d "$searchdir"; then case "$searchdir" in */lib32/ | */lib32 ) acl_libdirstem2=lib32 ;; */lib64/ | */lib64 ) acl_libdirstem3=lib64 ;; */../ | */.. ) # Better ignore directories of this form. They are misleading. ;; *) searchdir=`cd "$searchdir" && pwd` case "$searchdir" in */lib32 ) acl_libdirstem2=lib32 ;; */lib64 ) acl_libdirstem3=lib64 ;; esac ;; esac fi done IFS="$acl_save_IFS" if test $HOST_CPU_C_ABI_32BIT = yes; then # 32-bit ABI. acl_libdirstem3= fi if test $HOST_CPU_C_ABI_32BIT = no; then # 64-bit ABI. acl_libdirstem2= fi fi ;; esac test -n "$acl_libdirstem2" || acl_libdirstem2="$acl_libdirstem" test -n "$acl_libdirstem3" || acl_libdirstem3="$acl_libdirstem" acl_cv_libdirstems="$acl_libdirstem,$acl_libdirstem2,$acl_libdirstem3" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acl_cv_libdirstems" >&5 $as_echo "$acl_cv_libdirstems" >&6; } acl_libdirstem=`echo "$acl_cv_libdirstems" | sed -e 's/,.*//'` acl_libdirstem2=`echo "$acl_cv_libdirstems" | sed -e 's/^[^,]*,//' -e 's/,.*//'` acl_libdirstem3=`echo "$acl_cv_libdirstems" | sed -e 's/^[^,]*,[^,]*,//' -e 's/,.*//'` use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libiconv-prefix was given. if test "${with_libiconv_prefix+set}" = set; then : withval=$with_libiconv_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" additional_libdir2="$withval/$acl_libdirstem2" additional_libdir3="$withval/$acl_libdirstem3" fi fi fi if test "X$additional_libdir2" = "X$additional_libdir"; then additional_libdir2= fi if test "X$additional_libdir3" = "X$additional_libdir"; then additional_libdir3= fi LIBICONV= LTLIBICONV= INCICONV= LIBICONV_PREFIX= HAVE_LIBICONV= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='iconv ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./+-|ABCDEFGHIJKLMNOPQRSTUVWXYZ____|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBICONV="${LIBICONV}${LIBICONV:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$value" else : fi else found_dir= found_la= found_so= found_a= eval libname=\"$acl_libname_spec\" # typically: libname=lib$name if test -n "$acl_shlibext"; then shrext=".$acl_shlibext" # typically: shrext=.so else shrext= fi if test $use_additional = yes; then for additional_libdir_variable in additional_libdir additional_libdir2 additional_libdir3; do if test "X$found_dir" = "X"; then eval dir=\$$additional_libdir_variable if test -n "$dir"; then if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi fi fi done fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no \ || test "X$found_dir" = "X/usr/$acl_libdirstem" \ || test "X$found_dir" = "X/usr/$acl_libdirstem2" \ || test "X$found_dir" = "X/usr/$acl_libdirstem3"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_a" else LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` if test "$name" = 'iconv'; then LIBICONV_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; */$acl_libdirstem2 | */$acl_libdirstem2/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem2/"'*$,,'` if test "$name" = 'iconv'; then LIBICONV_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; */$acl_libdirstem3 | */$acl_libdirstem3/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem3/"'*$,,'` if test "$name" = 'iconv'; then LIBICONV_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCICONV="${INCICONV}${INCICONV:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) dependency_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$dependency_libdir" != "X/usr/$acl_libdirstem" \ && test "X$dependency_libdir" != "X/usr/$acl_libdirstem2" \ && test "X$dependency_libdir" != "X/usr/$acl_libdirstem3"; then haveit= if test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem" \ || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem2" \ || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem3"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$dependency_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$dependency_libdir"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$dependency_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$dependency_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$dependency_libdir"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$dependency_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBICONV="${LIBICONV}${LIBICONV:+ }$dep" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$dep" ;; esac done fi else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$acl_hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-R$found_dir" done fi if test "$mu_cv_with_gettext" != no; then : acm_save__0_CPPFLAGS=$CPPFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFPreferencesCopyAppValue" >&5 $as_echo_n "checking for CFPreferencesCopyAppValue... " >&6; } if ${gt_cv_func_CFPreferencesCopyAppValue+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_LIBS="$LIBS" LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { CFPreferencesCopyAppValue(NULL, NULL) ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_CFPreferencesCopyAppValue=yes else gt_cv_func_CFPreferencesCopyAppValue=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFPreferencesCopyAppValue" >&5 $as_echo "$gt_cv_func_CFPreferencesCopyAppValue" >&6; } if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then $as_echo "#define HAVE_CFPREFERENCESCOPYAPPVALUE 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLocaleCopyPreferredLanguages" >&5 $as_echo_n "checking for CFLocaleCopyPreferredLanguages... " >&6; } if ${gt_cv_func_CFLocaleCopyPreferredLanguages+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_LIBS="$LIBS" LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { CFLocaleCopyPreferredLanguages(); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_CFLocaleCopyPreferredLanguages=yes else gt_cv_func_CFLocaleCopyPreferredLanguages=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFLocaleCopyPreferredLanguages" >&5 $as_echo "$gt_cv_func_CFLocaleCopyPreferredLanguages" >&6; } if test $gt_cv_func_CFLocaleCopyPreferredLanguages = yes; then $as_echo "#define HAVE_CFLOCALECOPYPREFERREDLANGUAGES 1" >>confdefs.h fi INTL_MACOSX_LIBS= if test $gt_cv_func_CFPreferencesCopyAppValue = yes \ || test $gt_cv_func_CFLocaleCopyPreferredLanguages = yes; then INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" fi LIBINTL= LTLIBINTL= POSUB= case " $gt_needs " in *" need-formatstring-macros "*) gt_api_version=3 ;; *" need-ngettext "*) gt_api_version=2 ;; *) gt_api_version=1 ;; esac gt_func_gnugettext_libc="gt_cv_func_gnugettext${gt_api_version}_libc" gt_func_gnugettext_libintl="gt_cv_func_gnugettext${gt_api_version}_libintl" if test "$USE_NLS" = "yes"; then gt_use_preinstalled_gnugettext=no if test $gt_api_version -ge 3; then gt_revision_test_code=' #ifndef __GNU_GETTEXT_SUPPORTED_REVISION #define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) #endif typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; ' else gt_revision_test_code= fi if test $gt_api_version -ge 2; then gt_expression_test_code=' + * ngettext ("", "", 0)' else gt_expression_test_code= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libc" >&5 $as_echo_n "checking for GNU gettext in libc... " >&6; } if eval \${$gt_func_gnugettext_libc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef __GNU_GETTEXT_SUPPORTED_REVISION extern int _nl_msg_cat_cntr; extern int *_nl_domain_bindings; #define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_domain_bindings) #else #define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 #endif $gt_revision_test_code int main () { bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$gt_func_gnugettext_libc=yes" else eval "$gt_func_gnugettext_libc=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$gt_func_gnugettext_libc { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then am_save_CPPFLAGS="$CPPFLAGS" for element in $INCICONV; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv" >&5 $as_echo_n "checking for iconv... " >&6; } if ${am_cv_func_iconv+:} false; then : $as_echo_n "(cached) " >&6 else am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_lib_iconv=yes am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$am_save_LIBS" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv" >&5 $as_echo "$am_cv_func_iconv" >&6; } if test "$am_cv_func_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working iconv" >&5 $as_echo_n "checking for working iconv... " >&6; } if ${am_cv_func_iconv_works+:} false; then : $as_echo_n "(cached) " >&6 else am_save_LIBS="$LIBS" if test $am_cv_lib_iconv = yes; then LIBS="$LIBS $LIBICONV" fi am_cv_func_iconv_works=no for ac_iconv_const in '' 'const'; do if test "$cross_compiling" = yes; then : case "$host_os" in aix* | hpux*) am_cv_func_iconv_works="guessing no" ;; *) am_cv_func_iconv_works="guessing yes" ;; esac else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #ifndef ICONV_CONST # define ICONV_CONST $ac_iconv_const #endif int main () { int result = 0; /* Test against AIX 5.1 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_utf8_to_88591 = iconv_open ("ISO8859-1", "UTF-8"); if (cd_utf8_to_88591 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\342\202\254"; /* EURO SIGN */ char buf[10]; ICONV_CONST char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_utf8_to_88591, &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) result |= 1; iconv_close (cd_utf8_to_88591); } } /* Test against Solaris 10 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_ascii_to_88591 = iconv_open ("ISO8859-1", "646"); if (cd_ascii_to_88591 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\263"; char buf[10]; ICONV_CONST char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_ascii_to_88591, &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) result |= 2; iconv_close (cd_ascii_to_88591); } } /* Test against AIX 6.1..7.1 bug: Buffer overrun. */ { iconv_t cd_88591_to_utf8 = iconv_open ("UTF-8", "ISO-8859-1"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\304"; static char buf[2] = { (char)0xDE, (char)0xAD }; ICONV_CONST char *inptr = input; size_t inbytesleft = 1; char *outptr = buf; size_t outbytesleft = 1; size_t res = iconv (cd_88591_to_utf8, &inptr, &inbytesleft, &outptr, &outbytesleft); if (res != (size_t)(-1) || outptr - buf > 1 || buf[1] != (char)0xAD) result |= 4; iconv_close (cd_88591_to_utf8); } } #if 0 /* This bug could be worked around by the caller. */ /* Test against HP-UX 11.11 bug: Positive return value instead of 0. */ { iconv_t cd_88591_to_utf8 = iconv_open ("utf8", "iso88591"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\304rger mit b\366sen B\374bchen ohne Augenma\337"; char buf[50]; ICONV_CONST char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_88591_to_utf8, &inptr, &inbytesleft, &outptr, &outbytesleft); if ((int)res > 0) result |= 8; iconv_close (cd_88591_to_utf8); } } #endif /* Test against HP-UX 11.11 bug: No converter from EUC-JP to UTF-8 is provided. */ { /* Try standardized names. */ iconv_t cd1 = iconv_open ("UTF-8", "EUC-JP"); /* Try IRIX, OSF/1 names. */ iconv_t cd2 = iconv_open ("UTF-8", "eucJP"); /* Try AIX names. */ iconv_t cd3 = iconv_open ("UTF-8", "IBM-eucJP"); /* Try HP-UX names. */ iconv_t cd4 = iconv_open ("utf8", "eucJP"); if (cd1 == (iconv_t)(-1) && cd2 == (iconv_t)(-1) && cd3 == (iconv_t)(-1) && cd4 == (iconv_t)(-1)) result |= 16; if (cd1 != (iconv_t)(-1)) iconv_close (cd1); if (cd2 != (iconv_t)(-1)) iconv_close (cd2); if (cd3 != (iconv_t)(-1)) iconv_close (cd3); if (cd4 != (iconv_t)(-1)) iconv_close (cd4); } return result; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : am_cv_func_iconv_works=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi test "$am_cv_func_iconv_works" = no || break done LIBS="$am_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv_works" >&5 $as_echo "$am_cv_func_iconv_works" >&6; } case "$am_cv_func_iconv_works" in *no) am_func_iconv=no am_cv_lib_iconv=no ;; *) am_func_iconv=yes ;; esac else am_func_iconv=no am_cv_lib_iconv=no fi if test "$am_func_iconv" = yes; then $as_echo "#define HAVE_ICONV 1" >>confdefs.h fi if test "$am_cv_lib_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libiconv" >&5 $as_echo_n "checking how to link with libiconv... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBICONV" >&5 $as_echo "$LIBICONV" >&6; } else CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libintl-prefix was given. if test "${with_libintl_prefix+set}" = set; then : withval=$with_libintl_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" additional_libdir2="$withval/$acl_libdirstem2" additional_libdir3="$withval/$acl_libdirstem3" fi fi fi if test "X$additional_libdir2" = "X$additional_libdir"; then additional_libdir2= fi if test "X$additional_libdir3" = "X$additional_libdir"; then additional_libdir3= fi LIBINTL= LTLIBINTL= INCINTL= LIBINTL_PREFIX= HAVE_LIBINTL= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='intl ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./+-|ABCDEFGHIJKLMNOPQRSTUVWXYZ____|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBINTL="${LIBINTL}${LIBINTL:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$value" else : fi else found_dir= found_la= found_so= found_a= eval libname=\"$acl_libname_spec\" # typically: libname=lib$name if test -n "$acl_shlibext"; then shrext=".$acl_shlibext" # typically: shrext=.so else shrext= fi if test $use_additional = yes; then for additional_libdir_variable in additional_libdir additional_libdir2 additional_libdir3; do if test "X$found_dir" = "X"; then eval dir=\$$additional_libdir_variable if test -n "$dir"; then if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi fi fi done fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no \ || test "X$found_dir" = "X/usr/$acl_libdirstem" \ || test "X$found_dir" = "X/usr/$acl_libdirstem2" \ || test "X$found_dir" = "X/usr/$acl_libdirstem3"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_a" else LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` if test "$name" = 'intl'; then LIBINTL_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; */$acl_libdirstem2 | */$acl_libdirstem2/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem2/"'*$,,'` if test "$name" = 'intl'; then LIBINTL_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; */$acl_libdirstem3 | */$acl_libdirstem3/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem3/"'*$,,'` if test "$name" = 'intl'; then LIBINTL_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCINTL="${INCINTL}${INCINTL:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) dependency_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$dependency_libdir" != "X/usr/$acl_libdirstem" \ && test "X$dependency_libdir" != "X/usr/$acl_libdirstem2" \ && test "X$dependency_libdir" != "X/usr/$acl_libdirstem3"; then haveit= if test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem" \ || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem2" \ || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem3"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$dependency_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$dependency_libdir"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$dependency_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$dependency_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$dependency_libdir"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$dependency_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBINTL="${LIBINTL}${LIBINTL:+ }$dep" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$dep" ;; esac done fi else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$acl_hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-R$found_dir" done fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libintl" >&5 $as_echo_n "checking for GNU gettext in libintl... " >&6; } if eval \${$gt_func_gnugettext_libintl+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $INCINTL" gt_save_LIBS="$LIBS" LIBS="$LIBS $LIBINTL" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef __GNU_GETTEXT_SUPPORTED_REVISION extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); #define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_expand_alias ("")) #else #define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 #endif $gt_revision_test_code int main () { bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$gt_func_gnugettext_libintl=yes" else eval "$gt_func_gnugettext_libintl=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" != yes; } && test -n "$LIBICONV"; then LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef __GNU_GETTEXT_SUPPORTED_REVISION extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); #define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_expand_alias ("")) #else #define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 #endif $gt_revision_test_code int main () { bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : LIBINTL="$LIBINTL $LIBICONV" LTLIBINTL="$LTLIBINTL $LTLIBICONV" eval "$gt_func_gnugettext_libintl=yes" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS" fi eval ac_res=\$$gt_func_gnugettext_libintl { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" = "yes"; } \ || { { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; } \ && test "$PACKAGE" != gettext-runtime \ && test "$PACKAGE" != gettext-tools; }; then gt_use_preinstalled_gnugettext=yes else LIBINTL= LTLIBINTL= INCINTL= fi if test -n "$INTL_MACOSX_LIBS"; then if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" fi fi if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then $as_echo "#define ENABLE_NLS 1" >>confdefs.h else USE_NLS=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use NLS" >&5 $as_echo_n "checking whether to use NLS... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } if test "$USE_NLS" = "yes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking where the gettext function comes from" >&5 $as_echo_n "checking where the gettext function comes from... " >&6; } if test "$gt_use_preinstalled_gnugettext" = "yes"; then if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then gt_source="external libintl" else gt_source="libc" fi else gt_source="included intl directory" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_source" >&5 $as_echo "$gt_source" >&6; } fi if test "$USE_NLS" = "yes"; then if test "$gt_use_preinstalled_gnugettext" = "yes"; then if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libintl" >&5 $as_echo_n "checking how to link with libintl... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBINTL" >&5 $as_echo "$LIBINTL" >&6; } for element in $INCINTL; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done fi $as_echo "#define HAVE_GETTEXT 1" >>confdefs.h $as_echo "#define HAVE_DCGETTEXT 1" >>confdefs.h fi POSUB=po fi INTLLIBS="$LIBINTL" CPPFLAGS=$acm_save__0_CPPFLAGS if test "$gt_cv_func_gnugettext2_libc" != yes && test "$gt_cv_func_gnugettext2_libintl" != yes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: gettext not supported on this platform. disabling." >&5 $as_echo "$as_me: WARNING: gettext not supported on this platform. disabling." >&2;} mu_cv_with_gettext=no else if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}msginit", so it can be a program name with args. set dummy ${ac_tool_prefix}msginit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_MSGINIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$MSGINIT"; then ac_cv_prog_MSGINIT="$MSGINIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_MSGINIT="${ac_tool_prefix}msginit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi MSGINIT=$ac_cv_prog_MSGINIT if test -n "$MSGINIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGINIT" >&5 $as_echo "$MSGINIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_MSGINIT"; then ac_ct_MSGINIT=$MSGINIT # Extract the first word of "msginit", so it can be a program name with args. set dummy msginit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_MSGINIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_MSGINIT"; then ac_cv_prog_ac_ct_MSGINIT="$ac_ct_MSGINIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_MSGINIT="msginit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_MSGINIT=$ac_cv_prog_ac_ct_MSGINIT if test -n "$ac_ct_MSGINIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MSGINIT" >&5 $as_echo "$ac_ct_MSGINIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_MSGINIT" = x; then MSGINIT=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac MSGINIT=$ac_ct_MSGINIT fi else MSGINIT="$ac_cv_prog_MSGINIT" fi XGETTEXT_ARGS="-C -k_ -kP_:1,2 -s" acm_val_list="\$(LIBINTL)" acm_val_rev_list="" for val in $acm_val_list; do acm_val_rev_list="$val $acm_val_rev_list" done for val in $acm_val_rev_list; do case " ${I18N_LIBS} " in #( *" $val "*) : ;; #( *) : I18N_LIBS="$val${I18N_LIBS:+ $I18N_LIBS}" ;; esac done acm_val_list="$LIBINTL" acm_val_rev_list="" for val in $acm_val_list; do acm_val_rev_list="$val $acm_val_rev_list" done for val in $acm_val_rev_list; do case " ${LIBS} " in #( *" $val "*) : ;; #( *) : LIBS="$val${LIBS:+ $LIBS}" ;; esac done fi fi cat >>confdefs.h <<_ACEOF #define EM_USE_GETTEXT $(test "$mu_cv_with_gettext" != yes)$? _ACEOF am_save_CPPFLAGS="$CPPFLAGS" for element in $INCICONV; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv" >&5 $as_echo_n "checking for iconv... " >&6; } if ${am_cv_func_iconv+:} false; then : $as_echo_n "(cached) " >&6 else am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_lib_iconv=yes am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$am_save_LIBS" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv" >&5 $as_echo "$am_cv_func_iconv" >&6; } if test "$am_cv_func_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working iconv" >&5 $as_echo_n "checking for working iconv... " >&6; } if ${am_cv_func_iconv_works+:} false; then : $as_echo_n "(cached) " >&6 else am_save_LIBS="$LIBS" if test $am_cv_lib_iconv = yes; then LIBS="$LIBS $LIBICONV" fi am_cv_func_iconv_works=no for ac_iconv_const in '' 'const'; do if test "$cross_compiling" = yes; then : case "$host_os" in aix* | hpux*) am_cv_func_iconv_works="guessing no" ;; *) am_cv_func_iconv_works="guessing yes" ;; esac else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #ifndef ICONV_CONST # define ICONV_CONST $ac_iconv_const #endif int main () { int result = 0; /* Test against AIX 5.1 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_utf8_to_88591 = iconv_open ("ISO8859-1", "UTF-8"); if (cd_utf8_to_88591 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\342\202\254"; /* EURO SIGN */ char buf[10]; ICONV_CONST char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_utf8_to_88591, &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) result |= 1; iconv_close (cd_utf8_to_88591); } } /* Test against Solaris 10 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_ascii_to_88591 = iconv_open ("ISO8859-1", "646"); if (cd_ascii_to_88591 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\263"; char buf[10]; ICONV_CONST char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_ascii_to_88591, &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) result |= 2; iconv_close (cd_ascii_to_88591); } } /* Test against AIX 6.1..7.1 bug: Buffer overrun. */ { iconv_t cd_88591_to_utf8 = iconv_open ("UTF-8", "ISO-8859-1"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\304"; static char buf[2] = { (char)0xDE, (char)0xAD }; ICONV_CONST char *inptr = input; size_t inbytesleft = 1; char *outptr = buf; size_t outbytesleft = 1; size_t res = iconv (cd_88591_to_utf8, &inptr, &inbytesleft, &outptr, &outbytesleft); if (res != (size_t)(-1) || outptr - buf > 1 || buf[1] != (char)0xAD) result |= 4; iconv_close (cd_88591_to_utf8); } } #if 0 /* This bug could be worked around by the caller. */ /* Test against HP-UX 11.11 bug: Positive return value instead of 0. */ { iconv_t cd_88591_to_utf8 = iconv_open ("utf8", "iso88591"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static ICONV_CONST char input[] = "\304rger mit b\366sen B\374bchen ohne Augenma\337"; char buf[50]; ICONV_CONST char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_88591_to_utf8, &inptr, &inbytesleft, &outptr, &outbytesleft); if ((int)res > 0) result |= 8; iconv_close (cd_88591_to_utf8); } } #endif /* Test against HP-UX 11.11 bug: No converter from EUC-JP to UTF-8 is provided. */ { /* Try standardized names. */ iconv_t cd1 = iconv_open ("UTF-8", "EUC-JP"); /* Try IRIX, OSF/1 names. */ iconv_t cd2 = iconv_open ("UTF-8", "eucJP"); /* Try AIX names. */ iconv_t cd3 = iconv_open ("UTF-8", "IBM-eucJP"); /* Try HP-UX names. */ iconv_t cd4 = iconv_open ("utf8", "eucJP"); if (cd1 == (iconv_t)(-1) && cd2 == (iconv_t)(-1) && cd3 == (iconv_t)(-1) && cd4 == (iconv_t)(-1)) result |= 16; if (cd1 != (iconv_t)(-1)) iconv_close (cd1); if (cd2 != (iconv_t)(-1)) iconv_close (cd2); if (cd3 != (iconv_t)(-1)) iconv_close (cd3); if (cd4 != (iconv_t)(-1)) iconv_close (cd4); } return result; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : am_cv_func_iconv_works=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi test "$am_cv_func_iconv_works" = no || break done LIBS="$am_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv_works" >&5 $as_echo "$am_cv_func_iconv_works" >&6; } case "$am_cv_func_iconv_works" in *no) am_func_iconv=no am_cv_lib_iconv=no ;; *) am_func_iconv=yes ;; esac else am_func_iconv=no am_cv_lib_iconv=no fi if test "$am_func_iconv" = yes; then $as_echo "#define HAVE_ICONV 1" >>confdefs.h fi if test "$am_cv_lib_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libiconv" >&5 $as_echo_n "checking how to link with libiconv... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBICONV" >&5 $as_echo "$LIBICONV" >&6; } else CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi if test "$am_cv_func_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv declaration" >&5 $as_echo_n "checking for iconv declaration... " >&6; } if ${am_cv_proto_iconv+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include extern #ifdef __cplusplus "C" #endif #if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); #else size_t iconv(); #endif int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : am_cv_proto_iconv_arg1="" else am_cv_proto_iconv_arg1="const" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);" fi am_cv_proto_iconv=`echo "$am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_proto_iconv" >&5 $as_echo " $am_cv_proto_iconv" >&6; } else am_cv_proto_iconv_arg1="" fi cat >>confdefs.h <<_ACEOF #define ICONV_CONST $am_cv_proto_iconv_arg1 _ACEOF if test "$mu_cv_with_iconv" != no; then : acm_save__0_CPPFLAGS=$CPPFLAGS CPPFLAGS=$acm_save__0_CPPFLAGS if test "$am_cv_func_iconv" = yes; then : case $host in *-*-cygwin* | *-*-mingw32* ) mu_path_iconv=iconv ;; * ) # Extract the first word of "iconv", so it can be a program name with args. set dummy iconv; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_mu_path_iconv+:} false; then : $as_echo_n "(cached) " >&6 else case $mu_path_iconv in [\\/]* | ?:[\\/]*) ac_cv_path_mu_path_iconv="$mu_path_iconv" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_mu_path_iconv="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi mu_path_iconv=$ac_cv_path_mu_path_iconv if test -n "$mu_path_iconv"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_path_iconv" >&5 $as_echo "$mu_path_iconv" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; esac if test -n "$mu_path_iconv"; then : cat >>confdefs.h <<_ACEOF #define ICONV_UTIL_PATH "$mu_path_iconv" _ACEOF fi cat >>confdefs.h <<_ACEOF #define HAVE_ICONV_CONST $(test "$am_cv_proto_iconv_arg1" != const)$? _ACEOF acm_val_list="\$(LIBICONV)" acm_val_rev_list="" for val in $acm_val_list; do acm_val_rev_list="$val $acm_val_rev_list" done for val in $acm_val_rev_list; do case " ${I18N_LIBS} " in #( *" $val "*) : ;; #( *) : I18N_LIBS="$val${I18N_LIBS:+ $I18N_LIBS}" ;; esac done acm_val_list="$LIBICONV" acm_val_rev_list="" for val in $acm_val_list; do acm_val_rev_list="$val $acm_val_rev_list" done for val in $acm_val_rev_list; do case " ${LIBS} " in #( *" $val "*) : ;; #( *) : LIBS="$val${LIBS:+ $LIBS}" ;; esac done fi fi if ${mu_suppress_lsan_iconv+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Restoring LSan after iconv ..." >&5 $as_echo "$as_me: Restoring LSan after iconv ..." >&6;} { ASAN_OPTIONS=; unset ASAN_OPTIONS;} if test "x$mu_suppress_lsan_iconv" = xyes; then : else ASAN_OPTIONS=$mu_suppress_lsan_iconv export ASAN_OPTIONS fi { mu_suppress_lsan_iconv=; unset mu_suppress_lsan_iconv;} else if test "$mu_lsan_enabled" = yes; then : as_fn_error $? "LSan restore requested, but suppression for 'iconv' is not active" "$LINENO" 5 fi fi mu_expect_udev=no case $host in *-*-linux* ) mu_expect_udev=yes ;; esac # Check whether --with-udev was given. if test "${with_udev+set}" = set; then : withval=$with_udev; mu_cv_with_udev=$withval else mu_cv_with_udev=$mu_expect_udev fi if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 $as_echo "$PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 $as_echo "$ac_pt_PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi fi if test -n "$PKG_CONFIG"; then _pkg_min_version=0.9.0 { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 $as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PKG_CONFIG="" fi fi if test "$mu_cv_with_udev" != no; then : if test "$mu_cv_enable_shared" = no; then : { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: libudev does not support static linking" >&5 $as_echo "$as_me: WARNING: libudev does not support static linking" >&2;} mu_cv_with_udev="rules only" elif test "$mu_cv_with_udev" != "rules only"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for udev_new in -ludev" >&5 $as_echo_n "checking for udev_new in -ludev... " >&6; } if ${ac_cv_lib_udev_udev_new+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ludev $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char udev_new (); int main () { return udev_new (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_udev_udev_new=yes else ac_cv_lib_udev_udev_new=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_udev_udev_new" >&5 $as_echo "$ac_cv_lib_udev_udev_new" >&6; } if test "x$ac_cv_lib_udev_udev_new" = xyes; then : $as_echo "#define HAVE_LIBUDEV 1" >>confdefs.h UDEV_CPPFLAGS= if test "$mu_cv_enable_shared" = no; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for UDEV_LIBS" >&5 $as_echo_n "checking for UDEV_LIBS... " >&6; } if test -n "$UDEV_LIBS"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$UDEV_LIBS' (from environment)" >&5 $as_echo "'$UDEV_LIBS' (from environment)" >&6; } elif test -z "$PKG_CONFIG"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: 'udev' (default value)" >&5 $as_echo "'udev' (default value)" >&6; } UDEV_LIBS="udev" elif UDEV_LIBS=$($PKG_CONFIG --static --libs-only-l "libudev" 2>/dev/null); then : UDEV_LIBS=$(echo "$UDEV_LIBS" | sed 's/^-l//; s/[[:space:]]-l/ /g') { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$UDEV_LIBS' (from libudev.pc $($PKG_CONFIG --modversion libudev 2>/dev/null))" >&5 $as_echo "'$UDEV_LIBS' (from libudev.pc $($PKG_CONFIG --modversion libudev 2>/dev/null))" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: 'udev' (default value)" >&5 $as_echo "'udev' (default value)" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'pkg-config --static --libs-only-l libudev' failed" >&5 $as_echo "$as_me: WARNING: 'pkg-config --static --libs-only-l libudev' failed" >&2;} UDEV_LIBS="udev" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for UDEV_LIBS" >&5 $as_echo_n "checking for UDEV_LIBS... " >&6; } if test -n "$UDEV_LIBS"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$UDEV_LIBS' (from environment)" >&5 $as_echo "'$UDEV_LIBS' (from environment)" >&6; } elif test -z "$PKG_CONFIG"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: 'udev' (default value)" >&5 $as_echo "'udev' (default value)" >&6; } UDEV_LIBS="udev" elif UDEV_LIBS=$($PKG_CONFIG --libs-only-l "libudev" 2>/dev/null); then : UDEV_LIBS=$(echo "$UDEV_LIBS" | sed 's/^-l//; s/[[:space:]]-l/ /g') { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$UDEV_LIBS' (from libudev.pc $($PKG_CONFIG --modversion libudev 2>/dev/null))" >&5 $as_echo "'$UDEV_LIBS' (from libudev.pc $($PKG_CONFIG --modversion libudev 2>/dev/null))" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: 'udev' (default value)" >&5 $as_echo "'udev' (default value)" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'pkg-config --libs-only-l libudev' failed" >&5 $as_echo "$as_me: WARNING: 'pkg-config --libs-only-l libudev' failed" >&2;} UDEV_LIBS="udev" fi fi mu_cv_with_udev=yes acm_save__0_LIBS=$LIBS LIBS="${LIBS:+$LIBS }-l$UDEV_LIBS" for ac_func in udev_device_get_tags_list_entry udev_device_get_sysattr_list_entry do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done LIBS=$acm_save__0_LIBS else if test "$mu_cv_with_udev" != auto; then : { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: libudev not found" >&5 $as_echo "$as_me: WARNING: libudev not found" >&2;} mu_missing_deps="${mu_missing_deps:+$mu_missing_deps, }libudev-dev (or configure --without-udev)" fi mu_cv_with_udev=no fi fi fi if test "$mu_cv_with_udev" != no && test -z "$UDEV_RULES_DIR"; then : if test "$prefix" = "/usr" || { test "$prefix" = "NONE" && test "$ac_default_prefix" = "/usr" ; }; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for UDEV_DIR" >&5 $as_echo_n "checking for UDEV_DIR... " >&6; } if test -n "$UDEV_DIR"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$UDEV_DIR' (from environment)" >&5 $as_echo "'$UDEV_DIR' (from environment)" >&6; } elif test -z "$PKG_CONFIG"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '/lib/udev' (default value)" >&5 $as_echo "'/lib/udev' (default value)" >&6; } UDEV_DIR="/lib/udev" elif UDEV_DIR=$($PKG_CONFIG --variable="udevdir" "udev" 2>/dev/null); then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$UDEV_DIR' (from udev.pc $($PKG_CONFIG --modversion udev 2>/dev/null))" >&5 $as_echo "'$UDEV_DIR' (from udev.pc $($PKG_CONFIG --modversion udev 2>/dev/null))" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: '/lib/udev' (default value)" >&5 $as_echo "'/lib/udev' (default value)" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'pkg-config --variable=\"udevdir\" udev' failed" >&5 $as_echo "$as_me: WARNING: 'pkg-config --variable=\"udevdir\" udev' failed" >&2;} UDEV_DIR="/lib/udev" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for UDEV_DIR" >&5 $as_echo_n "checking for UDEV_DIR... " >&6; } UDEV_DIR="/etc/udev" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $UDEV_DIR (for prefix=$EXP_PREFIX)" >&5 $as_echo "$UDEV_DIR (for prefix=$EXP_PREFIX)" >&6; } fi UDEV_RULES_DIR="$UDEV_DIR/rules.d" fi mu_expect_systemd=no case $host in *-*-linux* ) mu_expect_systemd=yes ;; esac # Check whether --enable-systemd was given. if test "${enable_systemd+set}" = set; then : enableval=$enable_systemd; mu_cv_enable_systemd=$enableval else mu_cv_enable_systemd=$mu_expect_systemd fi if test "$mu_cv_enable_systemd" = yes; then : if test "$prefix" = "/usr" || { test "$prefix" = "NONE" && test "$ac_default_prefix" = "/usr" ; }; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SYSTEMD_UNIT_DIR" >&5 $as_echo_n "checking for SYSTEMD_UNIT_DIR... " >&6; } if test -n "$SYSTEMD_UNIT_DIR"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$SYSTEMD_UNIT_DIR' (from environment)" >&5 $as_echo "'$SYSTEMD_UNIT_DIR' (from environment)" >&6; } elif test -z "$PKG_CONFIG"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '/lib/systemd/system' (default value)" >&5 $as_echo "'/lib/systemd/system' (default value)" >&6; } SYSTEMD_UNIT_DIR="/lib/systemd/system" elif SYSTEMD_UNIT_DIR=$($PKG_CONFIG --variable="systemdsystemunitdir" "systemd" 2>/dev/null); then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$SYSTEMD_UNIT_DIR' (from systemd.pc $($PKG_CONFIG --modversion systemd 2>/dev/null))" >&5 $as_echo "'$SYSTEMD_UNIT_DIR' (from systemd.pc $($PKG_CONFIG --modversion systemd 2>/dev/null))" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: '/lib/systemd/system' (default value)" >&5 $as_echo "'/lib/systemd/system' (default value)" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'pkg-config --variable=\"systemdsystemunitdir\" systemd' failed" >&5 $as_echo "$as_me: WARNING: 'pkg-config --variable=\"systemdsystemunitdir\" systemd' failed" >&2;} SYSTEMD_UNIT_DIR="/lib/systemd/system" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SYSTEMD_UNIT_DIR" >&5 $as_echo_n "checking for SYSTEMD_UNIT_DIR... " >&6; } SYSTEMD_UNIT_DIR="$EXP_LIBDIR/systemd/system" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SYSTEMD_UNIT_DIR (for prefix=$EXP_PREFIX)" >&5 $as_echo "$SYSTEMD_UNIT_DIR (for prefix=$EXP_PREFIX)" >&6; } fi fi cat >>confdefs.h <<_ACEOF #define EM_USE_NOTIFY_SOCKET $(test "$mu_cv_enable_systemd" != yes)$? _ACEOF mu_expect_sysctl=no case $host in *-*-linux* ) mu_expect_sysctl=yes ;; esac # Check whether --enable-sysctl was given. if test "${enable_sysctl+set}" = set; then : enableval=$enable_sysctl; mu_cv_enable_sysctl=$enableval else mu_cv_enable_sysctl=$mu_expect_sysctl fi if test "$mu_cv_enable_sysctl" = yes; then : if test "$prefix" = "/usr" || { test "$prefix" = "NONE" && test "$ac_default_prefix" = "/usr" ; }; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SYSCTL_DIR" >&5 $as_echo_n "checking for SYSCTL_DIR... " >&6; } if test -n "$SYSCTL_DIR"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$SYSCTL_DIR' (from environment)" >&5 $as_echo "'$SYSCTL_DIR' (from environment)" >&6; } elif test -z "$PKG_CONFIG"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$EXP_LIBDIR/sysctl.d' (default value)" >&5 $as_echo "'$EXP_LIBDIR/sysctl.d' (default value)" >&6; } SYSCTL_DIR="$EXP_LIBDIR/sysctl.d" elif SYSCTL_DIR=$($PKG_CONFIG --variable="sysctldir" "systemd" 2>/dev/null); then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$SYSCTL_DIR' (from systemd.pc $($PKG_CONFIG --modversion systemd 2>/dev/null))" >&5 $as_echo "'$SYSCTL_DIR' (from systemd.pc $($PKG_CONFIG --modversion systemd 2>/dev/null))" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: '$EXP_LIBDIR/sysctl.d' (default value)" >&5 $as_echo "'$EXP_LIBDIR/sysctl.d' (default value)" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'pkg-config --variable=\"sysctldir\" systemd' failed" >&5 $as_echo "$as_me: WARNING: 'pkg-config --variable=\"sysctldir\" systemd' failed" >&2;} SYSCTL_DIR="$EXP_LIBDIR/sysctl.d" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SYSCTL_DIR" >&5 $as_echo_n "checking for SYSCTL_DIR... " >&6; } SYSCTL_DIR="$EXP_LIBDIR/sysctl.d" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SYSCTL_DIR (for prefix=$EXP_PREFIX)" >&5 $as_echo "$SYSCTL_DIR (for prefix=$EXP_PREFIX)" >&6; } fi fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu case $host in *-*-linux* ) ;; *-*-cygwin* | *-*-mingw32* ) if test -z "$SEEDD_CONTROL_SOCKET"; then : SEEDD_CONTROL_SOCKET=tcp:localhost:56789 fi bb_cv_env_winver=0x0600 bb_cv_env__win32_winnt=0x0600 cat >>confdefs.h <<_ACEOF #define WINVER $bb_cv_env_winver _ACEOF cat >>confdefs.h <<_ACEOF #define _WIN32_WINNT $bb_cv_env__win32_winnt _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: using WINVER = '$bb_cv_env_winver', _WIN32_WINNT = '$bb_cv_env__win32_winnt'" >&5 $as_echo "$as_me: using WINVER = '$bb_cv_env_winver', _WIN32_WINNT = '$bb_cv_env__win32_winnt'" >&6;} $as_echo "#define _GNU_SOURCE 1" >>confdefs.h ;; *-*-openbsd* ) if test -z "$THREAD_STACK_SIZE"; then : THREAD_STACK_SIZE=8192 fi $as_echo "#define HAVE_BROKEN_STDIO_LOCKING 1" >>confdefs.h ;; *-*-freebsd* ) if test -z "$THREAD_STACK_SIZE"; then : THREAD_STACK_SIZE=8192 fi ;; *-*-darwin* ) if test -z "$THREAD_STACK_SIZE"; then : THREAD_STACK_SIZE=8192 fi ;; esac if test -z "$SEEDD_CONTROL_SOCKET"; then : SEEDD_CONTROL_SOCKET="$SYSTEM_RUNDIR/bit-babbler/seedd.socket" fi if test -n "$SEEDD_CONTROL_SOCKET"; then : cat >>confdefs.h <<_ACEOF #define SEEDD_CONTROL_SOCKET "$SEEDD_CONTROL_SOCKET" _ACEOF fi if test -n "$THREAD_STACK_SIZE"; then : cat >>confdefs.h <<_ACEOF #define THREAD_STACK_SIZE $THREAD_STACK_SIZE _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 $as_echo_n "checking whether byte ordering is bigendian... " >&6; } if ${ac_cv_c_bigendian+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_bigendian=unknown # See if we're dealing with a universal compiler. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __APPLE_CC__ not a universal capable compiler #endif typedef int dummy; _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : # Check for potential -arch flags. It is not universal unless # there are at least two -arch flags with different values. ac_arch= ac_prev= for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do if test -n "$ac_prev"; then case $ac_word in i?86 | x86_64 | ppc | ppc64) if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then ac_arch=$ac_word else ac_cv_c_bigendian=universal break fi ;; esac ac_prev= elif test "x$ac_word" = "x-arch"; then ac_prev=arch fi done fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_c_bigendian = unknown; then # See if sys/param.h defines the BYTE_ORDER macro. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ && LITTLE_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : # It does; now see whether it defined to BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if BYTE_ORDER != BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : # It does; now see whether it defined to _BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef _BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # Compile a test program. if test "$cross_compiling" = yes; then : # Try to guess by grepping values from an object file. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; int main () { return use_ascii (foo) == use_ebcdic (foo); ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then ac_cv_c_bigendian=yes fi if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then if test "$ac_cv_c_bigendian" = unknown; then ac_cv_c_bigendian=no else # finding both strings is unlikely to happen, but who knows? ac_cv_c_bigendian=unknown fi fi fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Are we little or big endian? From Harbison&Steele. */ union { long int l; char c[sizeof (long int)]; } u; u.l = 1; return u.c[sizeof (long int) - 1] == 1; ; return 0; } _ACEOF if ac_fn_cxx_try_run "$LINENO"; then : ac_cv_c_bigendian=no else ac_cv_c_bigendian=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 $as_echo "$ac_cv_c_bigendian" >&6; } case $ac_cv_c_bigendian in #( yes) $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h ;; #( no) ;; #( universal) $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h ;; #( *) as_fn_error $? "unknown endianness presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; esac for ac_func in vasprintf do : ac_fn_cxx_check_func "$LINENO" "vasprintf" "ac_cv_func_vasprintf" if test "x$ac_cv_func_vasprintf" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_VASPRINTF 1 _ACEOF fi done for ac_func in gettimeofday localtime_r gmtime_r timegm clock_gettime do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_cxx_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done ac_fn_cxx_check_decl "$LINENO" "SIGRTMIN" "ac_cv_have_decl_SIGRTMIN" "#include " if test "x$ac_cv_have_decl_SIGRTMIN" = xyes; then : ac_have_decl=1 else ac_have_decl=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_SIGRTMIN $ac_have_decl _ACEOF ac_fn_cxx_check_decl "$LINENO" "LOG_MAKEPRI" "ac_cv_have_decl_LOG_MAKEPRI" "#include " if test "x$ac_cv_have_decl_LOG_MAKEPRI" = xyes; then : ac_have_decl=1 else ac_have_decl=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_LOG_MAKEPRI $ac_have_decl _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for abi::__forced_unwind" >&5 $as_echo_n "checking for abi::__forced_unwind... " >&6; } if ${mu_cv_type_forced_unwind+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { void f(const abi::__forced_unwind&); ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_type_forced_unwind=yes else mu_cv_type_forced_unwind=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_type_forced_unwind" >&5 $as_echo "$mu_cv_type_forced_unwind" >&6; } if test "$mu_cv_type_forced_unwind" = yes; then : $as_echo "#define HAVE_ABI_FORCED_UNWIND 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for namespace abi" >&5 $as_echo_n "checking for namespace abi... " >&6; } if ${mu_cv_namespace_abi+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { using namespace abi; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_namespace_abi=yes else mu_cv_namespace_abi=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_namespace_abi" >&5 $as_echo "$mu_cv_namespace_abi" >&6; } fi if test "$mu_cv_namespace_abi" = yes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for namespace abi alias to __cxxabiv1" >&5 $as_echo_n "checking for namespace abi alias to __cxxabiv1... " >&6; } if ${mu_cv_namespace_alias_abi+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include namespace __cxxabiv1 { struct xx {}; } int main () { abi::xx x; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_namespace_alias_abi=yes else mu_cv_namespace_alias_abi=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_namespace_alias_abi" >&5 $as_echo "$mu_cv_namespace_alias_abi" >&6; } fi if test "$mu_cv_namespace_alias_abi" = yes; then : $as_echo "#define HAVE_ABI_ALIAS_TO_CXXABIV1 1" >>confdefs.h fi acm_save_ACM_FUNC_PTHREAD_SETNAME_CPPFLAGS=$CPPFLAGS CPPFLAGS="${CPPFLAGS:+$CPPFLAGS }$PTHREAD_CPPFLAGS" acm_save_ACM_FUNC_PTHREAD_SETNAME_LDFLAGS=$LDFLAGS LDFLAGS="${LDFLAGS:+$LDFLAGS }$PTHREAD_LDFLAGS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU pthread_setname_np" >&5 $as_echo_n "checking for GNU pthread_setname_np... " >&6; } if ${mu_cv_func_gnu_pthread_setname_np+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { pthread_setname_np(pthread_self(),"x"); ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : mu_cv_func_gnu_pthread_setname_np=yes else mu_cv_func_gnu_pthread_setname_np=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_func_gnu_pthread_setname_np" >&5 $as_echo "$mu_cv_func_gnu_pthread_setname_np" >&6; } if test "$mu_cv_func_gnu_pthread_setname_np" = yes; then : $as_echo "#define HAVE_PTHREAD_SETNAME_NP_GNU 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MacOS pthread_setname_np" >&5 $as_echo_n "checking for MacOS pthread_setname_np... " >&6; } if ${mu_cv_func_mac_pthread_setname_np+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { pthread_setname_np("x"); ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : mu_cv_func_mac_pthread_setname_np=yes else mu_cv_func_mac_pthread_setname_np=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_func_mac_pthread_setname_np" >&5 $as_echo "$mu_cv_func_mac_pthread_setname_np" >&6; } fi if test "$mu_cv_func_mac_pthread_setname_np" = yes; then : $as_echo "#define HAVE_PTHREAD_SETNAME_NP_MAC 1" >>confdefs.h fi if test "$mu_cv_func_gnu_pthread_setname_np" != yes && test "$mu_cv_func_mac_pthread_setname_np" != yes; then : for ac_func in pthread_set_name_np do : ac_fn_cxx_check_func "$LINENO" "pthread_set_name_np" "ac_cv_func_pthread_set_name_np" if test "x$ac_cv_func_pthread_set_name_np" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_PTHREAD_SET_NAME_NP 1 _ACEOF fi done fi LDFLAGS=$acm_save_ACM_FUNC_PTHREAD_SETNAME_LDFLAGS CPPFLAGS=$acm_save_ACM_FUNC_PTHREAD_SETNAME_CPPFLAGS for ac_header in unordered_map do : ac_fn_cxx_check_header_mongrel "$LINENO" "unordered_map" "ac_cv_header_unordered_map" "$ac_includes_default" if test "x$ac_cv_header_unordered_map" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UNORDERED_MAP 1 _ACEOF else for ac_header in tr1/unordered_map do : ac_fn_cxx_check_header_mongrel "$LINENO" "tr1/unordered_map" "ac_cv_header_tr1_unordered_map" "$ac_includes_default" if test "x$ac_cv_header_tr1_unordered_map" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_TR1_UNORDERED_MAP 1 _ACEOF else for ac_header in ext/hash_map do : ac_fn_cxx_check_header_mongrel "$LINENO" "ext/hash_map" "ac_cv_header_ext_hash_map" "$ac_includes_default" if test "x$ac_cv_header_ext_hash_map" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_EXT_HASH_MAP 1 _ACEOF fi done fi done fi done { $as_echo "$as_me:${as_lineno-$LINENO}: Including EM_PUSH/POP_DIAGNOSTIC preprocessor macros ..." >&5 $as_echo "$as_me: Including EM_PUSH/POP_DIAGNOSTIC preprocessor macros ..." >&6;} acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((noreturn))" >&5 $as_echo_n "checking if $CXX supports function attribute ((noreturn))... " >&6; } if ${mu_cv_cxx_function_attr_noreturn+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int f() __attribute__((noreturn)); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_cxx_function_attr_noreturn=yes else mu_cv_cxx_function_attr_noreturn=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_cxx_function_attr_noreturn" >&5 $as_echo "$mu_cv_cxx_function_attr_noreturn" >&6; } if test "x$mu_cv_cxx_function_attr_noreturn" = xyes; then : acm_attr_defn="__attribute__((noreturn))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_NORETURN $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((const))" >&5 $as_echo_n "checking if $CXX supports function attribute ((const))... " >&6; } if ${mu_cv_cxx_function_attr_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int f() __attribute__((const)); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_cxx_function_attr_const=yes else mu_cv_cxx_function_attr_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_cxx_function_attr_const" >&5 $as_echo "$mu_cv_cxx_function_attr_const" >&6; } if test "x$mu_cv_cxx_function_attr_const" = xyes; then : acm_attr_defn="__attribute__((const))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_CONST $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((pure))" >&5 $as_echo_n "checking if $CXX supports function attribute ((pure))... " >&6; } if ${mu_cv_cxx_function_attr_pure+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int f() __attribute__((pure)); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_cxx_function_attr_pure=yes else mu_cv_cxx_function_attr_pure=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_cxx_function_attr_pure" >&5 $as_echo "$mu_cv_cxx_function_attr_pure" >&6; } if test "x$mu_cv_cxx_function_attr_pure" = xyes; then : acm_attr_defn="__attribute__((pure))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_PURE $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((cold))" >&5 $as_echo_n "checking if $CXX supports function attribute ((cold))... " >&6; } if ${mu_cv_cxx_function_attr_cold+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int f() __attribute__((cold)); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_cxx_function_attr_cold=yes else mu_cv_cxx_function_attr_cold=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_cxx_function_attr_cold" >&5 $as_echo "$mu_cv_cxx_function_attr_cold" >&6; } if test "x$mu_cv_cxx_function_attr_cold" = xyes; then : acm_attr_defn="__attribute__((cold))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_COLD $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi if test "x$mu_cv_CXX_flag__Werror_unknown_sanitizers" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=unknown-sanitizers" else if test "x$mu_cv_CXX_flag__Werror_unknown_sanitizers" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=unknown-sanitizers" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=unknown-sanitizers" >&5 $as_echo_n "checking if $CXX supports -Werror=unknown-sanitizers... " >&6; } if ${mu_cv_CXX_flag__Werror_unknown_sanitizers+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_unknown_sanitizers=yes else mu_cv_CXX_flag__Werror_unknown_sanitizers=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_unknown_sanitizers" >&5 $as_echo "$mu_cv_CXX_flag__Werror_unknown_sanitizers" >&6; } if test "x$mu_cv_CXX_flag__Werror_unknown_sanitizers" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_unknown_sanitizers" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=unknown-sanitizers" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi as_cachevar=`$as_echo "mu_cv_cxx_function_attr_no_sanitize("float-divide-by-zero")" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((no_sanitize(\"float-divide-by-zero\")))" >&5 $as_echo_n "checking if $CXX supports function attribute ((no_sanitize(\"float-divide-by-zero\")))... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int f() __attribute__((no_sanitize("float-divide-by-zero"))); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : acm_attr_defn="__attribute__((no_sanitize(\"float-divide-by-zero\")))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_NO_SANITIZE_FLOAT_DIVIDE_BY_ZERO $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi if test "x$mu_cv_CXX_flag__Werror_unknown_sanitizers" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=unknown-sanitizers" else if test "x$mu_cv_CXX_flag__Werror_unknown_sanitizers" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=unknown-sanitizers" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=unknown-sanitizers" >&5 $as_echo_n "checking if $CXX supports -Werror=unknown-sanitizers... " >&6; } if ${mu_cv_CXX_flag__Werror_unknown_sanitizers+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_unknown_sanitizers=yes else mu_cv_CXX_flag__Werror_unknown_sanitizers=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_unknown_sanitizers" >&5 $as_echo "$mu_cv_CXX_flag__Werror_unknown_sanitizers" >&6; } if test "x$mu_cv_CXX_flag__Werror_unknown_sanitizers" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_unknown_sanitizers" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=unknown-sanitizers" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi as_cachevar=`$as_echo "mu_cv_cxx_function_attr_no_sanitize("unsigned-integer-overflow")" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((no_sanitize(\"unsigned-integer-overflow\")))" >&5 $as_echo_n "checking if $CXX supports function attribute ((no_sanitize(\"unsigned-integer-overflow\")))... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int f() __attribute__((no_sanitize("unsigned-integer-overflow"))); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : acm_attr_defn="__attribute__((no_sanitize(\"unsigned-integer-overflow\")))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS case $host in #( *-*-cygwin* | *-*-mingw32*) : acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi as_cachevar=`$as_echo "mu_cv_cxx_function_attr_format (__gnu_printf__,fmt,arg1)" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((format (__gnu_printf__,fmt,arg1)))" >&5 $as_echo_n "checking if $CXX supports function attribute ((format (__gnu_printf__,fmt,arg1)))... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ void p(const char*,...) __attribute__((format (__gnu_printf__, 1, 2))); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : acm_attr_defn="__attribute__((format (__gnu_printf__,fmt,arg1)))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_PRINTF_FORMAT( fmt, arg1 ) $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS ;; #( *) : acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi as_cachevar=`$as_echo "mu_cv_cxx_function_attr_format (__printf__,fmt,arg1)" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((format (__printf__,fmt,arg1)))" >&5 $as_echo_n "checking if $CXX supports function attribute ((format (__printf__,fmt,arg1)))... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ void p(const char*,...) __attribute__((format (__printf__, 1, 2))); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : acm_attr_defn="__attribute__((format (__printf__,fmt,arg1)))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_PRINTF_FORMAT( fmt, arg1 ) $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS ;; esac case $host in #( *-*-cygwin* | *-*-mingw32*) : acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi as_cachevar=`$as_echo "mu_cv_cxx_function_attr_format (__gnu_strftime__,fmt,0)" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((format (__gnu_strftime__,fmt,0)))" >&5 $as_echo_n "checking if $CXX supports function attribute ((format (__gnu_strftime__,fmt,0)))... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ void p(const char*,...) __attribute__((format (__gnu_strftime__, 1, 0))); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : acm_attr_defn="__attribute__((format (__gnu_strftime__,fmt,0)))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_STRFTIME_FORMAT( fmt ) $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS ;; #( *) : acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi as_cachevar=`$as_echo "mu_cv_cxx_function_attr_format (__strftime__,fmt,0)" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports function attribute ((format (__strftime__,fmt,0)))" >&5 $as_echo_n "checking if $CXX supports function attribute ((format (__strftime__,fmt,0)))... " >&6; } if eval \${$as_cachevar+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ void p(const char*,...) __attribute__((format (__strftime__, 1, 0))); int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : eval "$as_cachevar=yes" else eval "$as_cachevar=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_cachevar { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_cachevar"\" = x"yes"; then : acm_attr_defn="__attribute__((format (__strftime__,fmt,0)))" else acm_attr_defn="" fi cat >>confdefs.h <<_ACEOF #define BB_STRFTIME_FORMAT( fmt ) $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS ;; esac acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS=$CXXFLAGS if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" else if test "x$mu_cv_CXX_flag__Werror_attributes" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=attributes" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=attributes" >&5 $as_echo_n "checking if $CXX supports -Werror=attributes... " >&6; } if ${mu_cv_CXX_flag__Werror_attributes+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_attributes=yes else mu_cv_CXX_flag__Werror_attributes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_attributes" >&5 $as_echo "$mu_cv_CXX_flag__Werror_attributes" >&6; } if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_attributes" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=attributes" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi if test "x$mu_cv_CXX_flag__Werror_missing_declarations" = xyes; then : CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=missing-declarations" else if test "x$mu_cv_CXX_flag__Werror_missing_declarations" = xno; then : else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }$ACM_CXX_WARNINGFAIL" acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS=$CXXFLAGS CXXFLAGS="${CXXFLAGS:+$CXXFLAGS }-Werror=missing-declarations" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports -Werror=missing-declarations" >&5 $as_echo_n "checking if $CXX supports -Werror=missing-declarations... " >&6; } if ${mu_cv_CXX_flag__Werror_missing_declarations+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_CXX_flag__Werror_missing_declarations=yes else mu_cv_CXX_flag__Werror_missing_declarations=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_CXX_flag__Werror_missing_declarations" >&5 $as_echo "$mu_cv_CXX_flag__Werror_missing_declarations" >&6; } if test "x$mu_cv_CXX_flag__Werror_missing_declarations" = xyes; then : else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_OPTION_CXXFLAGS fi if test "x$mu_cv_CXX_flag__Werror_missing_declarations" = xyes; then : CXXFLAGS="${acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS:+$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS }-Werror=missing-declarations" else CXXFLAGS=$acm_save___ACM_ADD_COMPILER_WARNING_CXXFLAGS fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CXX supports statement attribute ((fallthrough))" >&5 $as_echo_n "checking if $CXX supports statement attribute ((fallthrough))... " >&6; } if ${mu_cv_cxx_statement_attr_fallthrough+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int f(int i); int f(int i) { switch(i) { case 1: __attribute__((fallthrough)); case 2: return i; } return 0; } int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : mu_cv_cxx_statement_attr_fallthrough=yes else mu_cv_cxx_statement_attr_fallthrough=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $mu_cv_cxx_statement_attr_fallthrough" >&5 $as_echo "$mu_cv_cxx_statement_attr_fallthrough" >&6; } if test "x$mu_cv_cxx_statement_attr_fallthrough" = xyes; then : acm_attr_defn="__attribute__((fallthrough))" else acm_attr_defn="do {} while(0)" fi cat >>confdefs.h <<_ACEOF #define BB_FALLTHROUGH $acm_attr_defn _ACEOF CXXFLAGS=$acm_save_ACM_CHECK_ATTRIBUTE_CXXFLAGS if test -n "$LIBUSB_DIR"; then : USB_CPPFLAGS="-I$LIBUSB_DIR/include $USB_CPPFLAGS" USB_LDFLAGS="-L$LIBUSB_DIR/lib $USB_LDFLAGS" fi acm_save__0_CPPFLAGS=$CPPFLAGS CPPFLAGS="${CPPFLAGS:+$CPPFLAGS }$USB_CPPFLAGS" for ac_header in libusb-1.0/libusb.h libusb.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_cxx_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF break fi done if test "$ac_cv_header_libusb_1_0_libusb_h" = "yes"; then : libusb_header="" elif test "$ac_cv_header_libusb_h" = "yes"; then : libusb_header="" fi if test -n "$libusb_header"; then : cat >>confdefs.h <<_ACEOF #define LIBUSB_HEADER $libusb_header _ACEOF fi CPPFLAGS=$acm_save__0_CPPFLAGS acm_save__0_LDFLAGS=$LDFLAGS LDFLAGS="${LDFLAGS:+$LDFLAGS }$USB_LDFLAGS" acm_save__0_LIBS=$LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing libusb_init" >&5 $as_echo_n "checking for library containing libusb_init... " >&6; } if ${ac_cv_search_libusb_init+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char libusb_init (); int main () { return libusb_init (); ; return 0; } _ACEOF for ac_lib in '' usb-1.0 usb; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_cxx_try_link "$LINENO"; then : ac_cv_search_libusb_init=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_libusb_init+:} false; then : break fi done if ${ac_cv_search_libusb_init+:} false; then : else ac_cv_search_libusb_init=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_libusb_init" >&5 $as_echo "$ac_cv_search_libusb_init" >&6; } ac_res=$ac_cv_search_libusb_init if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" $as_echo "#define HAVE_LIBUSB 1" >>confdefs.h case $ac_cv_search_libusb_init in #( -l*) : USB_LIBS=${ac_cv_search_libusb_init#-l} ;; #( *) : ;; esac else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: libusb not found" >&5 $as_echo "$as_me: WARNING: libusb not found" >&2;} case $host in *-*-openbsd* ) mu_missing_deps="${mu_missing_deps:+$mu_missing_deps, }libusb1-1.0" ;; *-*-kfreebsd* ) mu_missing_deps="${mu_missing_deps:+$mu_missing_deps, }libusb2-dev" ;; * ) mu_missing_deps="${mu_missing_deps:+$mu_missing_deps, }libusb-1.0-0-dev" ;; esac fi for ac_func in libusb_strerror libusb_get_port_numbers libusb_has_capability do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_cxx_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done LIBS=$acm_save__0_LIBS LDFLAGS=$acm_save__0_LDFLAGS ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$mu_missing_deps"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: " >&5 $as_echo "$as_me: " >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: Some required dependencies were not found." >&5 $as_echo "$as_me: Some required dependencies were not found." >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: Please install: $mu_missing_deps" >&5 $as_echo "$as_me: Please install: $mu_missing_deps" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: " >&5 $as_echo "$as_me: " >&6;} as_fn_error $? "Cannot continue until this is resolved." "$LINENO" 5 fi if test -z "$libusb_header"; then : as_fn_error $? "No libusb header file found" "$LINENO" 5 fi LIBVIRT_SOCKET="$SYSTEM_RUNDIR/libvirt/libvirt-sock" { $as_echo "$as_me:${as_lineno-$LINENO}: Configured bit-babbler $PACKAGE_VERSION" >&5 $as_echo "$as_me: Configured bit-babbler $PACKAGE_VERSION" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: with udev: $mu_cv_with_udev" >&5 $as_echo "$as_me: with udev: $mu_cv_with_udev" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: SEEDD_CONTROL_SOCKET: $SEEDD_CONTROL_SOCKET" >&5 $as_echo "$as_me: SEEDD_CONTROL_SOCKET: $SEEDD_CONTROL_SOCKET" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: LIBVIRT_SOCKET: $LIBVIRT_SOCKET" >&5 $as_echo "$as_me: LIBVIRT_SOCKET: $LIBVIRT_SOCKET" >&6;} if test -n "$THREAD_STACK_SIZE"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: THREAD_STACK_SIZE: $THREAD_STACK_SIZE" >&5 $as_echo "$as_me: THREAD_STACK_SIZE: $THREAD_STACK_SIZE" >&6;} fi case $host in *-*-openbsd* ) { $as_echo "$as_me:${as_lineno-$LINENO}: NOTE: On OpenBSD you will need to build this by using gmake," >&5 $as_echo "$as_me: NOTE: On OpenBSD you will need to build this by using gmake," >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: and you will need to have the bash package installed." >&5 $as_echo "$as_me: and you will need to have the bash package installed." >&6;} ;; *-*-freebsd* ) { $as_echo "$as_me:${as_lineno-$LINENO}: NOTE: On FreeBSD you will need to build this by using gmake," >&5 $as_echo "$as_me: NOTE: On FreeBSD you will need to build this by using gmake," >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: and you will need to have the bash package installed." >&5 $as_echo "$as_me: and you will need to have the bash package installed." >&6;} ;; esac ac_config_files="$ac_config_files munin/bit_babbler" MAKEUP_PLATFORM_HEADER="${makeup_build_platform}_setup.h" MAKEUP_FLAVOUR_HEADER="$makeup_build_platform${makeup_build_flavour}_setup.h" CPPFLAGS="$CPPFLAGS -I\$(top_builddir)/include \$(EXTRACPPFLAGS)" CFLAGS="$CFLAGS \$(EXTRACFLAGS)" CXXFLAGS="$CXXFLAGS \$(EXTRACXXFLAGS)" ac_config_commands="$ac_config_commands Makefile" ac_config_files="$ac_config_files Makefile.acsubst:Makeup/gmake-fragments/makefile.acsubst" ac_config_headers="$ac_config_headers include/private_setup.h:private_setup.h.in" ac_config_commands="$ac_config_commands include/setup.h" ac_config_files="$ac_config_files Makefile.acsubst.bit-babbler:Makeup/config/acsubst.bit-babbler" ac_config_files="$ac_config_files Makefile.acsubst.sysctl:Makeup/ac-fragments/acsubst.sysctl" ac_config_files="$ac_config_files Makefile.acsubst.systemd:Makeup/ac-fragments/acsubst.systemd" ac_config_files="$ac_config_files Makefile.acsubst.udev:Makeup/ac-fragments/acsubst.udev" ac_config_files="$ac_config_files 60-bit-babbler.rules:Makeup/config/acfile.60-bit-babbler.rules" ac_config_files="$ac_config_files bit-babbler-sysctl.conf:Makeup/config/acfile.bit-babbler-sysctl.conf" ac_config_files="$ac_config_files seedd.service:Makeup/config/acfile.seedd.service" ac_config_files="$ac_config_files seedd-wait.service:Makeup/config/acfile.seedd-wait.service" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by bit-babbler $as_me 0.9, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ bit-babbler config.status 0.9 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # # Capture the value of obsolete ALL_LINGUAS because we need it to compute # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. OBSOLETE_ALL_LINGUAS="$ALL_LINGUAS" # Capture the value of LINGUAS because we need it to compute CATALOGS. LINGUAS="${LINGUAS-%UNSET%}" makeup_version="0.38" package_name="bit-babbler" package_version="0.9" __package_config_dir="" __package_config_public="setup.h" if test ! -e $srcdir/private_setup.h.in; then touch $srcdir/private_setup.h.in; fi; acm_public_macros="$acm_public_macros" $acm_public_macros_def acm_public_strings="$acm_public_strings" $acm_public_strings_def LN_S="$LN_S" config_platform="$MAKEUP_PLATFORM_HEADER" config_flavour="$MAKEUP_FLAVOUR_HEADER" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "po-directories") CONFIG_COMMANDS="$CONFIG_COMMANDS po-directories" ;; "munin/bit_babbler") CONFIG_FILES="$CONFIG_FILES munin/bit_babbler" ;; "Makefile") CONFIG_COMMANDS="$CONFIG_COMMANDS Makefile" ;; "Makefile.acsubst") CONFIG_FILES="$CONFIG_FILES Makefile.acsubst:Makeup/gmake-fragments/makefile.acsubst" ;; "include/private_setup.h") CONFIG_HEADERS="$CONFIG_HEADERS include/private_setup.h:private_setup.h.in" ;; "include/setup.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/setup.h" ;; "Makefile.acsubst.bit-babbler") CONFIG_FILES="$CONFIG_FILES Makefile.acsubst.bit-babbler:Makeup/config/acsubst.bit-babbler" ;; "Makefile.acsubst.sysctl") CONFIG_FILES="$CONFIG_FILES Makefile.acsubst.sysctl:Makeup/ac-fragments/acsubst.sysctl" ;; "Makefile.acsubst.systemd") CONFIG_FILES="$CONFIG_FILES Makefile.acsubst.systemd:Makeup/ac-fragments/acsubst.systemd" ;; "Makefile.acsubst.udev") CONFIG_FILES="$CONFIG_FILES Makefile.acsubst.udev:Makeup/ac-fragments/acsubst.udev" ;; "60-bit-babbler.rules") CONFIG_FILES="$CONFIG_FILES 60-bit-babbler.rules:Makeup/config/acfile.60-bit-babbler.rules" ;; "bit-babbler-sysctl.conf") CONFIG_FILES="$CONFIG_FILES bit-babbler-sysctl.conf:Makeup/config/acfile.bit-babbler-sysctl.conf" ;; "seedd.service") CONFIG_FILES="$CONFIG_FILES seedd.service:Makeup/config/acfile.seedd.service" ;; "seedd-wait.service") CONFIG_FILES="$CONFIG_FILES seedd-wait.service:Makeup/config/acfile.seedd-wait.service" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "po-directories":C) for ac_file in $CONFIG_FILES; do # Support "outfile[:infile[:infile...]]" case "$ac_file" in *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; esac # PO directories have a Makefile.in generated from Makefile.in.in. case "$ac_file" in */Makefile.in) # Adjust a relative srcdir. ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` ac_dir_suffix=/`echo "$ac_dir"|sed 's%^\./%%'` ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` # In autoconf-2.13 it is called $ac_given_srcdir. # In autoconf-2.50 it is called $srcdir. test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" case "$ac_given_srcdir" in .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; /*) top_srcdir="$ac_given_srcdir" ;; *) top_srcdir="$ac_dots$ac_given_srcdir" ;; esac # Treat a directory as a PO directory if and only if it has a # POTFILES.in file. This allows packages to have multiple PO # directories under different names or in different locations. if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then rm -f "$ac_dir/POTFILES" test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" gt_tab=`printf '\t'` cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ${gt_tab}]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" POMAKEFILEDEPS="POTFILES.in" # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend # on $ac_dir but don't depend on user-specified configuration # parameters. if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then # The LINGUAS file contains the set of available languages. if test -n "$OBSOLETE_ALL_LINGUAS"; then test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" fi ALL_LINGUAS=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"` POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" else # The set of available languages was given in configure.in. ALL_LINGUAS=$OBSOLETE_ALL_LINGUAS fi # Compute POFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po) # Compute UPDATEPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update) # Compute DUMMYPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop) # Compute GMOFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo) case "$ac_given_srcdir" in .) srcdirpre= ;; *) srcdirpre='$(srcdir)/' ;; esac POFILES= UPDATEPOFILES= DUMMYPOFILES= GMOFILES= for lang in $ALL_LINGUAS; do POFILES="$POFILES $srcdirpre$lang.po" UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" DUMMYPOFILES="$DUMMYPOFILES $lang.nop" GMOFILES="$GMOFILES $srcdirpre$lang.gmo" done # CATALOGS depends on both $ac_dir and the user's LINGUAS # environment variable. INST_LINGUAS= if test -n "$ALL_LINGUAS"; then for presentlang in $ALL_LINGUAS; do useit=no if test "%UNSET%" != "$LINGUAS"; then desiredlanguages="$LINGUAS" else desiredlanguages="$ALL_LINGUAS" fi for desiredlang in $desiredlanguages; do # Use the presentlang catalog if desiredlang is # a. equal to presentlang, or # b. a variant of presentlang (because in this case, # presentlang can be used as a fallback for messages # which are not translated in the desiredlang catalog). case "$desiredlang" in "$presentlang"*) useit=yes;; esac done if test $useit = yes; then INST_LINGUAS="$INST_LINGUAS $presentlang" fi done fi CATALOGS= if test -n "$INST_LINGUAS"; then for lang in $INST_LINGUAS; do CATALOGS="$CATALOGS $lang.gmo" done fi test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do if test -f "$f"; then case "$f" in *.orig | *.bak | *~) ;; *) cat "$f" >> "$ac_dir/Makefile" ;; esac fi done fi ;; esac done ;; "munin/bit_babbler":F) chmod +x munin/bit_babbler ;; "Makefile":C) cat > Makefile < # # This file is distributed under the terms of the GNU GPL version 2. # # As a special exception to the GPL, it may be distributed without # modification as a part of a program using a makeup generated build # system, under the same distribution terms as the program itself. include Makefile.acsubst include \$(MAKEUP_TOP_CONFIG) ifneq (\$(strip \$(MAKEUP_VERBOSE)),) include \$(MAKEUP_GMAKE_DIR)/makefile.makeup else -include \$(MAKEUP_GMAKE_DIR)/makefile.makeup endif EOF ;; "include/setup.h":C) _SUBDIR="$(dirname setup.h)/" if test "$_SUBDIR" = "./"; then _SUBDIR= fi _TEMPFILE="include/.tempfile" _GUARD="_MAKEFILE_PLATFORM_$(echo $package_name | tr "a-z .-" "A-Z___")_CONF_H" cat > $_TEMPFILE < * * This file is distributed under the terms of the GNU GPL version 2. * * As a special exception to the GPL, it may be distributed without * modification as a part of a program using a makeup generated build * system, under the same distribution terms as the program itself. */ #ifndef ${_GUARD} #define ${_GUARD} // Guard for POSIX dependent code #if defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__)) #if (EM_PLATFORM_POSIX != 1) #define EM_PLATFORM_POSIX 1 #endif // Guard for Linux kernel dependent code #if defined(__linux__) #if linux == 1 #define SAVE_linux #undef linux #elif defined(linux) #warning Macro 'linux' is defined to a value other than 1 #endif #if (EM_PLATFORM_LINUX != 1) #define EM_PLATFORM_LINUX 1 #endif #define EM_PLATFORM__ linux #else // Guard for BSD dependent code #include #if defined(BSD) || defined(__FreeBSD_kernel__) #if (EM_PLATFORM_BSD != 1) #define EM_PLATFORM_BSD 1 #endif #define EM_PLATFORM__ bsd #endif // Guard for MacOSX dependent code #if defined(__APPLE__) && defined(__MACH__) && (EM_PLATFORM_MAC != 1) #define EM_PLATFORM_MAC 1 #endif #endif #endif // Guard for Windows dependent code #if defined(_WIN32) #if (EM_PLATFORM_MSW != 1) #define EM_PLATFORM_MSW 1 #endif #define EM_PLATFORM__ msw #endif #ifndef EM_PLATFORM__ #error Platform unrecognised. #endif // Feature override macro. // // You may define the value of this macro to specify a configuration // other than the system default. 'd' will attempt to use a debug // build, 'r' a release build. Other flavour options may be defined // by individual packages in their own configuration. #ifndef EM_CONFIG_FLAVOUR #define EM_CONFIG_FLAVOUR #endif #define EM_CAT(a,b) EM_CAT_(a,b) #define EM_CAT_(a,b) a ## b #define EM_CONFIG_HEADER <${__package_config_dir}EM_CAT(EM_PLATFORM__,EM_CONFIG_FLAVOUR)_${__package_config_public}> #include EM_CONFIG_HEADER #ifndef _${_GUARD} #error Config header cannot be located #endif #undef EM_CAT #undef EM_CAT_ #undef EM_PLATFORM__ #undef EM_CONFIG_HEADER #ifdef SAVE_linux #define linux 1 #undef SAVE_linux #endif // Compiler version tests. // // This macro will return false if the version of gcc in use // is earlier than the specified major, minor limit, or if gcc // is not being used. Otherwise it will evaluate to be true. // This will also be true for the clang compiler, for whatever // GCC version it is pretending to be compatible with. #if defined(__GNUC__) && defined(__GNUC_MINOR__) #define EM_COMPILER_GCC( major, minor ) ( ( __GNUC__ > (major) ) \\ || ( __GNUC__ == (major) && __GNUC_MINOR__ >= (minor) ) ) #else #define EM_COMPILER_GCC( major, minor ) 0 #endif // As above, except for the clang compiler instead. #if defined(__clang_major__) && defined(__clang_minor__) #define EM_COMPILER_CLANG( major, minor ) ( ( __clang_major__ > (major) ) \\ || ( __clang_major__ == (major) && __clang_minor__ >= (minor) ) ) #else #define EM_COMPILER_CLANG( major, minor ) 0 #endif #endif // ${_GUARD} EOF if diff --brief include/setup.h $_TEMPFILE > /dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: setup.h is unchanged" >&5 $as_echo "$as_me: setup.h is unchanged" >&6;} rm $_TEMPFILE else mv $_TEMPFILE include/setup.h fi echo "/* Makeup generated $_SUBDIR$config_flavour */" > $_TEMPFILE echo >> $_TEMPFILE echo "#ifndef _${_GUARD}" >> $_TEMPFILE echo "#define _${_GUARD}" >> $_TEMPFILE echo >> $_TEMPFILE for m in $acm_public_macros; do eval echo "/\* \$acm_public_macro_desc_$m \*/" >> $_TEMPFILE echo "#ifndef $m" >> $_TEMPFILE eval echo "\#define $m \$acm_public_macro_$m" >> $_TEMPFILE echo "#endif" >> $_TEMPFILE echo >> $_TEMPFILE done for s in $acm_public_strings; do eval echo "/\* \$acm_public_string_desc_$s \*/" >> $_TEMPFILE echo "#ifndef $s" >> $_TEMPFILE eval echo "\#define $s \\\"\$acm_public_string_$s\\\"" >> $_TEMPFILE echo "#endif" >> $_TEMPFILE echo >> $_TEMPFILE done echo "#endif // _${_GUARD}" >> $_TEMPFILE if diff --brief include/$_SUBDIR$config_flavour $_TEMPFILE > /dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $_SUBDIR$config_flavour is unchanged" >&5 $as_echo "$as_me: $_SUBDIR$config_flavour is unchanged" >&6;} rm $_TEMPFILE else mv $_TEMPFILE include/$_SUBDIR$config_flavour fi ( cd include/$_SUBDIR if test ! -e $config_platform; then $LN_S $config_flavour $config_platform fi ) ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi bit-babbler-0.9/configure.ac0000644000000000000000000020262014136173163012712 0ustar # Makeup 0.38 generated configure.ac. # Do not edit this file directly, your changes will be lost. # Copyright (C) 2003 - 2021, Ron # # This file is distributed under the terms of the GNU GPL version 2. # # As a special exception to the GPL, it may be distributed without # modification as a part of a program using a makeup generated build # system, under the same distribution terms as the program itself. AC_INIT([bit-babbler], [0.9], [ron@debian.org]) AC_COPYRIGHT([Copyright (C) 2003 - 2021, Ron ]) AC_PREREQ([2.61]) AC_REVISION([generated by Makeup 0.38]) AC_CONFIG_SRCDIR([Makeup/Makeup.conf]) # We need these available before macros that follow. FIND_AND_LINK_IF_LOCAL([config.guess],[Makeup/ac-aux]) FIND_AND_LINK_IF_LOCAL([config.sub],[Makeup/ac-aux]) # This used to be ..._COPY_UNLESS_LOCAL, but using the # ..._CONFIG_AUX_DIR macro below has changed what we need. FIND_AND_LINK_IF_LOCAL([install-sh],[Makeup/ac-aux]) # We need the files above in place, else this will fail. AC_CONFIG_AUX_DIR([Makeup/ac-aux]) # Find out where we're building and who we're building for. AC_CANONICAL_BUILD AC_CANONICAL_HOST # Select the default language standard to use. CXX_STANDARD="-std=gnu++98" dnl ----- Begin: configure.stdtools ----- dnl Makeup configure boilerplate. dnl dnl Copyright 2003 - 2021, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. # Check standard args. AC_ARG_ENABLE([pipe], [AS_HELP_STRING([--enable-pipe], [use pipes instead of temporary files for]dnl [ faster compilation (default yes)])], [mu_cv_enable_pipe=$enableval], [mu_cv_enable_pipe=yes]) AC_ARG_ENABLE([optimisation], [AS_HELP_STRING([--enable-optimisation], [use compiler optimisation flags (default yes)])], [mu_cv_enable_optimisation=$enableval], [mu_cv_enable_optimisation=yes]) AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug], [enable extra debug code (default yes)])], [mu_cv_enable_debug=$enableval], [mu_cv_enable_debug=yes]) AC_ARG_ENABLE([profile], [AS_HELP_STRING([--enable-profile], [use profiling flags (default no)])], [mu_cv_enable_profiling=$enableval], [mu_cv_enable_profiling=no]) AC_ARG_ENABLE([extra_warnings], [AS_HELP_STRING([--enable-extra_warnings], [use extra compiler warnings (default yes)])], [mu_cv_enable_extra_warnings=$enableval], [mu_cv_enable_extra_warnings=yes]) AC_ARG_ENABLE([werror], [AS_HELP_STRING([--enable-werror], [fail on compile warnings, (default yes for]dnl [ release builds, no for debug builds)])], [ mu_cv_enable_fail_on_warning=$enableval], [ AS_IF([test "$mu_cv_enable_debug" = yes],[ mu_cv_enable_fail_on_warning=no ],[ dnl mu_cv_enable_fail_on_warning=yes dnl Basic tests like for iconv crap out right now dnl with warnings as errors. Disable temporarily. mu_cv_enable_fail_on_warning=no ]) ]) AC_ARG_ENABLE([valgrind_friendly], [AS_HELP_STRING([--enable-valgrind_friendly], [do extra cleanup to be valgrind clean (default no)])], [mu_cv_enable_valgrind_friendly=$enableval], [mu_cv_enable_valgrind_friendly=no]) AC_ARG_ENABLE([bison_deprecated_warnings], [AS_HELP_STRING([--enable-bison_deprecated_warnings], [let bison3 bark about deprecated bison2 constructs]dnl [ (default no)])], [mu_cv_enable_bison_deprecated_warnings=$enableval], [mu_cv_enable_bison_deprecated_warnings=no]) AC_ARG_ENABLE([code_suggestions], [AS_HELP_STRING([--enable-code_suggestions], [let the compiler suggest optimisation and safety]dnl [ changes (default yes)])], [mu_cv_enable_code_suggestions=$enableval], [mu_cv_enable_code_suggestions=yes]) AC_ARG_ENABLE([clang_almost_everything], [AS_HELP_STRING([--enable-clang_almost_everything@<:@=version@:>@], [build with most of clang's -Weverything warnings,]dnl [ optionally specifying the clang version to use]dnl [ (default no)])], [mu_cv_enable_clang_almost_everything=$enableval], [mu_cv_enable_clang_almost_everything=no]) AS_IF([test "$mu_cv_enable_clang_almost_everything" = yes], [mu_cv_prog_cc=${mu_cv_prog_cc:-clang} mu_cv_prog_cxx=${mu_cv_prog_cxx:-clang++}], [test "$mu_cv_enable_clang_almost_everything" != no], [mu_cv_prog_cc=${mu_cv_prog_cc:-clang-$mu_cv_enable_clang_almost_everything} mu_cv_prog_cxx=${mu_cv_prog_cxx:-clang++-$mu_cv_enable_clang_almost_everything}] ) dnl With _FORTIFY_SOURCE=2, it is possible that some conforming programs may fail, dnl but we'll default to the strongest tests and let the individual projects back dnl off from that if needed. It requires optimisation to be enabled, and glibc dnl since version 2.16 will warn if it is used with -O0, so we don't enable it by dnl default if --disable-optimisation was used. The Debian glibc patches out that dnl warning and just silently disables it if not building with optimisation, but dnl we still don't want to have other distro users nagged by that. AC_ARG_ENABLE([fortify_source], [AS_HELP_STRING([--enable-fortify_source@<:@=N@:>@], [compile with -D_FORTIFY_SOURCE=N (default 2]dnl [ if optimisation is enabled)])], [AS_IF([test "$enableval" = yes], [mu_cv_enable_fortify_source=2], [mu_cv_enable_fortify_source=$enableval]) ], [AS_IF([test "$mu_cv_enable_optimisation" = no], [mu_cv_enable_fortify_source=no], [mu_cv_enable_fortify_source=2]) ]) AC_ARG_ENABLE([stack_protector], [AS_HELP_STRING([--enable-stack_protector@<:@=option@:>@], [build with stack protection guards (default strong),]dnl [ may be set to 'strong', 'all', 'explicit', or a]dnl [ numeric value for the ssp-buffer-size parameter])], [AS_IF([test "$enableval" = yes], [mu_cv_enable_stack_protector=strong], [mu_cv_enable_stack_protector=$enableval]) ], [mu_cv_enable_stack_protector=strong]) AC_ARG_ENABLE([relro], [AS_HELP_STRING([--enable-relro], [make process memory read-only after relocation]dnl [ where possible (default yes)])], [mu_cv_enable_relro=$enableval], [mu_cv_enable_relro=yes]) dnl This is usually only a benefit when combined with relro, so link their defaults. AC_ARG_ENABLE([bind_now], [AS_HELP_STRING([--enable-bind_now], [resolve all symbols at process startup so that]dnl [ they can be included in relro (default yes if]dnl [ relro is enabled)])], [mu_cv_enable_bind_now=$enableval], [mu_cv_enable_bind_now=$mu_cv_enable_relro]) dnl Default -fsanitize= options to use with --enable-san. dnl The float-divide-by-zero and float-cast-overflow options are enabled by dnl -fsanitize=undefined with Clang, but GCC does not enable those by default. dnl It should be harmless to enable them explicitly on Clang though. m4_pushdef([mu_default_san_options], [[address,undefined,float-divide-by-zero,float-cast-overflow,integer,nullability]])dnl dnl m4_pushdef([mu_default_tsan_options],[[thread,undefined,integer,nullability]])dnl dnl AC_ARG_ENABLE([san], [AS_HELP_STRING([--enable-san@<:@=sanitizer,...@:>@], [build with runtime sanitiser support (default no), ]dnl [pass a comma-separated list of sanitizers, else ]dnl ["]mu_default_san_options[" will be used])], [AS_IF([test "$enableval" = yes], [mu_cv_enable_san="mu_default_san_options"], [mu_cv_enable_san=$enableval]) ], [mu_cv_enable_san=no]) dnl TSan can't be enabled together with ASan, so give it its own shortcut option. AC_ARG_ENABLE([tsan], [AS_HELP_STRING([--enable-tsan], [shortcut option for ]dnl [--enable-san=]mu_default_tsan_options)], [AS_IF([test "$enableval" = yes], [mu_cv_enable_san="mu_default_tsan_options"], [mu_cv_enable_san=$enableval]) ]) dnl dnl We don't need these anymore, so don't pollute the global namespace with them. m4_popdef([mu_default_tsan_options],[mu_default_san_options])dnl dnl ASan at least is not currently compatible with the _FORTIFY_SOURCE checks, dnl so disable that when the sanitisers are going to be used. AS_IF([test "$mu_cv_enable_san" != no],[mu_cv_enable_fortify_source=no]) AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared], [use dynamic linking (default yes)])], [mu_cv_enable_shared=$enableval], [mu_cv_enable_shared=yes]) AC_ARG_ENABLE([static], [AS_HELP_STRING([--enable-static], [use static linking (default no)])], [ AS_IF([test "$enableval" = yes],[mu_cv_enable_shared=no]) ]) dnl These are separately precious because overriding {C,CXX}FLAGS should not dnl normally mask the C/C++ standard that a project is built with, and that dnl might not be an immediately obvious consequence of setting them explictly. dnl If you really want to override that, do it with these (or by changing the dnl PACKAGE_{C,XX}STD set for the project), which likewise will also preserve dnl whatever other compiler flags would normally be used. AC_ARG_VAR([C_STANDARD], [flags to set the compiler C standard to use]) AC_ARG_VAR([CXX_STANDARD], [flags to set the compiler C++ standard to use]) dnl Not all platforms have GCC as their default compiler anymore, even if it is dnl still available by default. Autoconf still prefers to use GCC by default dnl in the AC_PROG_{CC,CXX} tests though. These variables let the search order dnl be explicitly specified by the user, and let us automatically tweak it for dnl different platforms. AC_ARG_VAR([CC_SEARCH], [space-separated list of which C compiler to prefer]) AC_ARG_VAR([CXX_SEARCH], [space-separated list of which C++ compiler to prefer]) AC_ARG_VAR([RC_SEP], [a hack for excluding windows resource files]) AC_ARG_VAR([ARFLAGS], [options passed to ar]) AC_ARG_VAR([YACCFLAGS], [options passed to bison/yacc]) AC_ARG_VAR([LEXFLAGS], [options passed to flex/lex]) AC_ARG_VAR([PICFLAGS], [extra flags for building dynamically linked object files]) AC_ARG_VAR([HOST_PICFLAGS], [the PICFLAGS needed for the intended host system]) AC_ARG_VAR([PTHREAD_CPPFLAGS], [C/C++ preprocessor flags for thread-safe builds]) AC_ARG_VAR([PTHREAD_LDFLAGS], [C/C++ linker flags for thread-safe builds]) dnl the EXTRAFOO variables allow appending additional flags without completely dnl overriding the normal default set (and/or having to specify them manually dnl just to add some additional option. AC_ARG_VAR([EXTRACPPFLAGS], [extra C preprocessor flags]) AC_ARG_VAR([EXTRACFLAGS], [extra C compiler flags]) AC_ARG_VAR([EXTRACXXFLAGS], [extra C++ compiler flags]) AC_ARG_VAR([EXTRALDFLAGS], [extra linker flags]) AC_ARG_VAR([EXTRAYACCFLAGS], [extra options passed to bison/yacc]) AC_ARG_VAR([EXTRALEXFLAGS], [extra options passed to flex/lex]) AC_ARG_VAR([EXTRALIBS], [extra libraries (to link before LIBS)]) AC_ARG_VAR([MAKEUP_HOST_ARCH], [architecture that targets should be built for]) AC_ARG_VAR([MAKEUP_DEFAULT_LINKAGE], [default linkage for binary targets]) AC_ARG_VAR([DSOEXT], [filename extension for dynamic libraries]) dnl On Linux, FHS 3.0 introduced /run to replace the /var/run directory, dnl FreeBSD and OpenBSD use a similar spec that is documented in hier(7), dnl but they still use /var/run at present. AC_ARG_VAR([SYSTEM_RUNDIR], [System directory for run-time variable data]) dnl Fully expanded paths for the standard installation directories. dnl Normally you should try to avoid using these, and instead use the standard dnl variables for them directly - but there are a few cases, such as paths in dnl configuration files, or in compiled or generated source files, where the dnl normal secondary expansions that some of these contain will not, or cannot dnl occur, and so the fully expanded path, using the value of $exec_prefix and dnl $prefix at configure time must be used instead. ACM_EXPAND_DIR([EXP_PREFIX],[$prefix]) ACM_EXPAND_DIR([EXP_EXEC_PREFIX],[$exec_prefix]) ACM_EXPAND_DIR([EXP_BINDIR],[$bindir]) ACM_EXPAND_DIR([EXP_SBINDIR],[$sbindir]) ACM_EXPAND_DIR([EXP_INCLUDEDIR],[$includedir]) ACM_EXPAND_DIR([EXP_LIBDIR],[$libdir]) ACM_EXPAND_DIR([EXP_DATADIR],[$datadir]) ACM_EXPAND_DIR([EXP_DOCDIR],[$docdir]) ACM_EXPAND_DIR([EXP_MANDIR],[$mandir]) ACM_EXPAND_DIR([EXP_LOCALEDIR],[$localedir]) # Oddly enough, the most preferred compiler is a platform specific thing, not a # universal truth. Who could have guessed ... dnl Keeping this list current with the changing Winds of Whim could become a dnl rather tedious and fragile thing, so it's tempting to default to checking dnl for cc and c++ first everywhere, on the assumption that all modern systems dnl now have that as an alias to their actually preferred toolchains, but that dnl has the downside of making it less obvious exactly which compiler is being dnl used, and making it even more fragile if some user has changed it from what dnl the normal platform default would otherwise be ... So let's see how this dnl goes for a while. At present the platform needing this most is OpenBSD, dnl since it still ships an ancient "last of the GPLv2" gcc in its base set, dnl but actually has clang as its default and preferred compiler. case $host in *-*-openbsd* | *-*-freebsd* | *-*-darwin* ) dnl OpenBSD (as of 6.2) still has GCC 4.2.1 installed in its base set, dnl but "defaults" to clang (which is what /usr/bin/cc points to), so dnl test for a working clang before gcc there. dnl dnl FreeBSD 11 considers clang to be its default compiler, and though dnl it ships with gcc 5-7, there seem to be an ever increasing number dnl of ways in which the GCC toolchain there is broken. We already dnl had workarounds for broken optimisation there (see the platform dnl specific toolchain tests below), and now we find that the version dnl of binutils it is using is known to be broken with --enable-relro dnl too (https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=219035#c4) dnl so stop swimming against the tide there and default it to clang. dnl dnl Likewise for MacOS, it being both a fork of FreeBSD and having a dnl near existential dread of the letters GPL. AS_IF([test -z "$CC_SEARCH"],[CC_SEARCH="clang gcc cc"]) AS_IF([test -z "$CXX_SEARCH"],[CXX_SEARCH="clang++ g++ c++"]) ;; * ) dnl By default, do what autoconf would otherwise do and prefer GCC, dnl except our second choice is clang (which it entirely ignores), dnl and we don't bother looking for the obscure C++ compilers which dnl it would check for if it doesn't find g++ or c++. When someone dnl proves they want them, and that they can compile our code, then dnl we can revise this list to add them. dnl dnl Ideally, we'd have defaulted to calling AC_PROG_{CC,CXX} with an dnl empty argument, and just let it do its own default thing, but that dnl macro is too broken to enable that, it checks if the argument is dnl empty during the m4 pass, so it considers an empty variable to be dnl an explicit list (and then fails at runtime with no compilers to dnl check) - and we can't AS_IF it and call it either with or without dnl arguments at runtime, because there are tests in there which will dnl only expand once, and so everything falls apart when they are only dnl expanded in the dead branch ... The assumption that it will only dnl ever appear once in one code path goes deep there. AS_IF([test -z "$CC_SEARCH"],[CC_SEARCH="gcc clang cc"]) AS_IF([test -z "$CXX_SEARCH"],[CXX_SEARCH="g++ clang++ c++"]) ;; esac SYSTEM_RUNDIR="/var/run" RC_SEP="#" case $host in *-*-linux* ) dnl Used in the make subsystem for rule selection. MAKEUP_HOST_ARCH="ELF" dnl Used to define the link names to the config header. makeup_build_platform="linux" DSOEXT=".so" dnl Don't do this by default anymore. It went out of vogue with gcc3. dnl CPPFLAGS="$CPPFLAGS -DUSE_GCC_PRAGMA" HOST_PICFLAGS="-fPIC" SYSTEM_RUNDIR="/run" ;; *-*-*bsd* | *-*-darwin* ) MAKEUP_HOST_ARCH="ELF" makeup_build_platform="bsd" DSOEXT=".so" HOST_PICFLAGS="-fPIC" ;; *-*-cygwin* | *-*-mingw32* ) MAKEUP_HOST_ARCH="PE" makeup_build_platform="msw" DSOEXT=".dll" HOST_PICFLAGS="-D_DLL=1 -D_WINDLL=1" AC_ARG_VAR([WINRCFLAGS], [options passed to windres]) WINRCFLAGS="--include-dir /usr/$host_alias/include" AS_IF([test -n "$mu_cv_with_wx_build_dir"],[ WINRCFLAGS="$WINRCFLAGS --include-dir $mu_cv_with_wx_build_dir/../include" ]) WINRCFLAGS="$WINRCFLAGS --define __WIN32__ --define __WIN95__ --define __GNUWIN32__" RC_SEP= ;; * ) AC_MSG_ERROR([Unknown host type. Stopping.]) ;; esac dnl This one may be added to later by user config that adds flavours. dnl At this level there are only two flavours, 'd' - debug, and 'r' - release. AS_IF([test "$mu_cv_enable_debug" = yes],[makeup_build_flavour=d],[makeup_build_flavour=r]) AS_IF([test "$mu_cv_enable_shared" = yes],[ MAKEUP_DEFAULT_LINKAGE="shared" PICFLAGS="\$(HOST_PICFLAGS)" ],[ MAKEUP_DEFAULT_LINKAGE="static" ]) # Check standard tools. dnl We might be overriding these based on an option request or test. dnl But we'll still want to check that they actually work. CC=${CC:-$mu_cv_prog_cc} CXX=${CXX:-$mu_cv_prog_cxx} dnl If the user explicitly set C/CXXFLAGS, and we have an explicitly specified dnl language standard to use, then prepend that to the *FLAGS but otherwise we dnl respect the user's selection of FLAGS and don't modify them here. If they dnl are not set yet, then we set them here to stop AC_PROG_* from adding its dnl own default options to them and flag that we'll be adding our own lot once dnl we've done the basic checks for a working toolchain. AS_IF([test "${CFLAGS+set}" = set],[ AS_IF([test -n "$C_STANDARD"],[CFLAGS="$C_STANDARD${CFLAGS:+ $CFLAGS}"]) ],[ CFLAGS=$C_STANDARD mu_use_our_cflags=yes ]) AS_IF([test "${CXXFLAGS+set}" = set],[ AS_IF([test -n "$CXX_STANDARD"],[CXXFLAGS="$CXX_STANDARD${CXXFLAGS:+ $CXXFLAGS}"]) ],[ CXXFLAGS=$CXX_STANDARD mu_use_our_cxxflags=yes ]) AC_MSG_NOTICE([Using ${C_STANDARD:-toolchain default} C standard]) AC_MSG_NOTICE([Using ${CXX_STANDARD:-toolchain default} C++ standard]) AC_PROG_CC([$CC_SEARCH]) AC_PROG_CPP AC_PROG_CXX([$CXX_SEARCH]) AC_PROG_CXXCPP dnl If we explicitly set the C/C++ standard to use, then ensure that is passed dnl when the preprocessor is run during the tests that follow. This is a bit dnl sketchy, because really this ought to be done as part of testing for how to dnl run the preprocessor above - and there are no separate variables for the dnl preprocessor flags for C and C++, the autoconf tests just use CPPFLAGS for dnl both, which is a bit difficult when we want to specify a C or C++ standard dnl to use in mixed code. If we don't do this though, then we can see dodgy or dnl misleading test results for things like AC_CHECK_HEADERS which runs both dnl the compiler and preprocessor as separate tests. If CFLAGS or CXXFLAGS set dnl a standard to use and the preprocessor flags do not, then the results could dnl be conflicting when things which do vary according to the standard that is dnl being used are involved. Fortunately, CPP and CXXCPP generally aren't used dnl very often outside of the feature tests here, and if there is a problem it dnl will probably shake out fairly early in the first test which does use it. AS_IF([test -n "$C_STANDARD"],[CPP="$CPP $C_STANDARD"]) AS_IF([test -n "$CXX_STANDARD"],[CXXCPP="$CXXCPP $CXX_STANDARD"]) AC_PROG_LEX dnl AC_PROG_YACC dnl Do this instead, we want real bison usually and not the crippled yaccalike. AC_CHECK_TOOL([YACC],[bison],[:]) AC_PROG_RANLIB AC_CHECK_TOOL([AR],[ar],[:]) AC_CHECK_TOOL([WINDRES],[windres],[:]) AC_PROG_LN_S AC_PROG_INSTALL AC_CHECK_PROGS([LCOV],[lcov],[:]) AC_CHECK_PROGS([GENHTML],[genhtml],[:]) # Configure toolchain options. dnl These we always apply, even when the user explicitly set *FLAGS manually. AS_IF([test "$mu_cv_enable_pipe" = yes], [ACM_ADD_OPT([CFLAGS,CXXFLAGS],[-pipe])]) dnl And this one is a little special, because we have to pass it to the linker dnl as well, but we don't have separate linker flags for C and C++ which means dnl we'll always need to enable it for both together while that stays true. AS_IF([test "$mu_cv_enable_profiling" = yes], [ACM_ADD_OPT([CFLAGS,CXXFLAGS,LDFLAGS],[-pg])]) dnl Build the lists of common, C, and C++ compiler flags which we only use if dnl CFlAGS and CXXFLAGS were not explicitly overridden by the user. We don't dnl need to test for these, they should be supported by all toolchains we use. dnl It's the common options we are most interested in here, so that we do not dnl need to duplicate them and the enable logic for both CFLAGS and CXXFLAGS. mu_common_flags= mu_cflags= mu_cxxflags= AS_IF([test "$mu_cv_enable_optimisation" = yes], [ACM_ADD_OPT([mu_common_flags],[-O2])]) AS_IF([test "$mu_cv_enable_debug" = yes], [ACM_ADD_OPT([mu_common_flags],[-g])]) AS_IF([test "$mu_cv_enable_fail_on_warning" = yes], [ACM_ADD_OPT([mu_common_flags],[-Werror])]) dnl Always use -Wall unless *FLAGS is explicitly overridden. ACM_ADD_OPT([mu_common_flags],[-Wall]) dnl These are enabled by default unless --disable-extra_warnings was used. AS_IF([test "$mu_cv_enable_extra_warnings" = yes],[ ACM_ADD_OPT([mu_common_flags],[-Wextra, -Wpointer-arith, -Wcast-qual, -Wcast-align, -Wformat=2, -Wfloat-equal]) ACM_ADD_OPT([mu_cflags],[-Wstrict-prototypes, -Wmissing-prototypes]) ACM_ADD_OPT([mu_cxxflags],[-Woverloaded-virtual]) ]) dnl Set CFLAGS using the options from above, if the user didn't override it. AS_IF([test "$mu_use_our_cflags" = yes],[ ACM_ADD_OPT([CFLAGS],[$mu_common_flags,$mu_cflags]) ]) dnl Set CXXFLAGS using the options from above, if the user didn't override it. AS_IF([test "$mu_use_our_cxxflags" = yes],[ ACM_ADD_OPT([CXXFLAGS],[$mu_common_flags,$mu_cxxflags]) ]) dnl Nothing should need these after this, so let's get them out of the global dnl namespace again so as to be perfectly clear about that in the future. m4_foreach([var],[mu_common_flags,mu_cflags,mu_cxxflags],[ AS_UNSET([var])dnl ]) dnl This option is disabled by default and if enabled sets clang as the default dnl CC and CXX, so we still add these to *FLAGS even if the user supplied their dnl own preferred set. If they don't want these, they can just not enable them. AS_IF([test "$mu_cv_enable_clang_almost_everything" != no],[ dnl Options common to both clang and clang++ for C and C++. ACM_ADD_OPT([CFLAGS,CXXFLAGS],[-Weverything, -Wno-c99-extensions, -Wno-vla-extension, -Wno-vla, -Wno-gnu-zero-variadic-macro-arguments, -Wno-variadic-macros, -Wno-disabled-macro-expansion, -Wno-undef, -Wno-padded, -Wno-packed, -Wno-documentation-html, -Wno-documentation-unknown-command])dnl dnl (There are currently no) Extra C specific options. dnl ACM_ADD_OPT([CFLAGS],[]) dnl Extra C++ specific options. ACM_ADD_OPT([CXXFLAGS],[-Wno-c++11-long-long, -Wno-exit-time-destructors, -Wno-global-constructors, -Wno-weak-vtables, -Wno-weak-template-vtables, -Wno-shadow])dnl dnl The above assumes a baseline of clang 3.5.0 as the minimum supported dnl version. We test for the options which were added in later versions. ACM_ADD_COMPILER_WARNING([C,CXX],[no-reserved-id-macro, no-format-pedantic, no-double-promotion])dnl ACM_ADD_COMPILER_WARNING([CXX],[no-shadow-field-in-constructor])dnl dnl The "override" keyword was added in C++11, so don't whine about it dnl not being used if we are building to an earlier standards version. dnl Clang itself should know this, but as of clang 11.0.1 appears not to. AS_CASE([$CXX_STANDARD], [*++98|*++03], [ACM_ADD_COMPILER_WARNING([CXX],[no-suggest-destructor-override, no-suggest-override])]dnl )dnl ]) dnl This option enables toolchain diagnostics which suggest ways that the code dnl can be changed or annotated to enable optimisations or safety checks that dnl are probably applicable, but which the compiler cannot determine with 100% dnl certainty that all the required conditions will always be met. dnl dnl We need to test if these extra warning options are actually supported by dnl the toolchain in use, we can't safely assume that they are with this lot. dnl The -Wsuggest-attribute options are currently GCC specific. AS_IF([test "$mu_cv_enable_code_suggestions" = yes],[ ACM_ADD_COMPILER_WARNING([C,CXX],[suggest-attribute=format, suggest-attribute=const, suggest-attribute=pure, suggest-attribute=noreturn, suggest-attribute=malloc, suggest-attribute=cold]) ]) dnl We need a custom template for this one. The alternative is running the dnl compiler/pre-processor to test for it, but that seems like overkill here. AH_VERBATIM([_FORTIFY_SOURCE], [/* Build with libc buffer overflow checks enabled. We need to guard this, because on some platforms the toolchain will already define it as a builtin, and then emit warnings if we redefine it. Ideally, we'd undefine it here and then force our choice of strictness, but we can't do that with autoheader because it sees that as a hook to rewrite. So just let people (yes, we're looking at you Gentoo) reap what they've sown if the toolchain or the environment they use has already defined it. */ #ifndef _FORTIFY_SOURCE # undef _FORTIFY_SOURCE #endif]) AS_IF([test "$mu_cv_enable_fortify_source" != no],[ AC_DEFINE_UNQUOTED([_FORTIFY_SOURCE],[$mu_cv_enable_fortify_source]) ]) dnl We use the case here as a portable test for if this was set to a numeric dnl value, for use with the older stack protector options, or a string value dnl to use one of the newer ones. It's split between two separate case tests dnl so we can fall back to the older method if the toolchain doesn not support dnl the newer options. We may want to special-case that further for some dnl options like 'explicit' which aren't exactly a superset of that, but for dnl now this gives us reasonable fallback behaviour for a default of 'strong'. dnl dnl We need to include the -fstack-protector option in both compiler and linker dnl flags so that libssp will be linked in correctly on platforms where it is dnl needed because the functions it provides are not integrated with libc. AS_CASE([$mu_cv_enable_stack_protector], [no],dnl Option is disabled [], [[''|*[!0-9]*]],dnl Value is not numeric [ACM_ADD_COMPILE_LINK_OPTION([C,CXX],[-fstack-protector-$mu_cv_enable_stack_protector]) AS_VAR_PUSHDEF([spvar],[mu_cv_ldflag_-fstack-protector-$mu_cv_enable_stack_protector]) AS_VAR_IF([spvar],[yes],[],[mu_cv_enable_stack_protector=4]) AS_VAR_POPDEF([spvar]) ] ) AS_CASE([$mu_cv_enable_stack_protector], [[''|*[!0-9]*]],dnl Value is not numeric [], [*],dnl Value is numeric [ACM_ADD_COMPILE_LINK_OPTION([C,CXX], [-fstack-protector --param ssp-buffer-size=$mu_cv_enable_stack_protector]) ] ) dnl Testing for these is a bit awkward, because unknown ld -z keywords will be dnl "ignored for Solaris compatibility", but we do still need to test for them dnl because the mingw linker at least does not support the -z option ... AS_IF([test "$mu_cv_enable_relro" = yes],[ ACM_ADD_LINKER_OPTION([[-Wl,-z,relro]]) ]) AS_IF([test "$mu_cv_enable_bind_now" = yes],[ ACM_ADD_LINKER_OPTION([[-Wl,-z,now]]) ]) dnl Add the needed toolchain options for any requested runtime sanitisers. AS_IF([test "$mu_cv_enable_san" != no],[ACM_ADD_SANITIZER([$mu_cv_enable_san])]) dnl Add any platform specific toolchain flags that are generally needed. case $host in *-*-freebsd* ) AC_LANG_PUSH([C++]) dnl On FreeBSD 11, both gcc6 and gcc7 will miscompile code when the dnl -fguess-branch-probability optimisation is enabled (which it is dnl with anything above -O0). We don't currently have a trivial test dnl case for that which we can use here, but the symptom is having an dnl exception which should normally be safely caught, instead invoke dnl terminate and kill the application. It would appear that the dnl stack context for unwinding is being lost in some code paths by dnl this optimisation, since an exception thrown in one path will be dnl fine, but one right next to it in another will explode. dnl dnl So until we have some proof of it being fixed, disable that when dnl using g++. We can't actually directly test if we are using g++, dnl because clang lies and defines all of gcc's macros, so instead dnl we can only test if we are not using clang (which would choke on dnl this test anyway, since it doesn't currently support that flag dnl in any case. ACM_PUSH_VAL([$0],[CXXFLAGS],[-fno-guess-branch-probability])dnl AC_CACHE_CHECK([if $CXX needs -fno-guess-branch-probability], [mu_cv_flag_guess_branch_probability], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[ #ifdef __clang__ using the clang compiler #endif ]] )], [mu_cv_flag_guess_branch_probability=yes], [mu_cv_flag_guess_branch_probability=no] ) ]) AS_IF([test "$mu_cv_flag_guess_branch_probability" = yes], [], [ACM_POP_VAR([$0],[CXXFLAGS])])dnl dnl And as above -freorder-blocks can cause the same symptom to manifest dnl on FreeBSD 11 with both gcc6 and gcc7, just in different places in dnl the code. ACM_PUSH_VAL([$0],[CXXFLAGS],[-fno-reorder-blocks])dnl AC_CACHE_CHECK([if $CXX needs -fno-reorder-blocks], [mu_cv_flag_reorder_blocks], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[ #ifdef __clang__ using the clang compiler #endif ]] )], [mu_cv_flag_reorder_blocks=yes], [mu_cv_flag_reorder_blocks=no] ) ]) AS_IF([test "$mu_cv_flag_reorder_blocks" = yes],[],[ACM_POP_VAR([$0],[CXXFLAGS])])dnl dnl And yet one more, that does it in yet another place. dnl If these continue to shake out, it might be safer to just build dnl with -O0 on FreeBSD 11 with gcc ... ACM_PUSH_VAL([$0],[CXXFLAGS],[-fno-tree-dominator-opts])dnl AC_CACHE_CHECK([if $CXX needs -fno-tree-dominator-opts], [mu_cv_flag_tree_dominator_opts], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[ #ifdef __clang__ using the clang compiler #endif ]] )], [mu_cv_flag_tree_dominator_opts=yes], [mu_cv_flag_tree_dominator_opts=no] ) ]) AS_IF([test "$mu_cv_flag_tree_dominator_opts" = yes], [], [ACM_POP_VAR([$0],[CXXFLAGS])])dnl AC_LANG_POP([C++]) ;; esac PTHREAD_CPPFLAGS="-pthread" PTHREAD_LDFLAGS="-pthread" ACM_PUSH_VAL([$0],[CPPFLAGS],[$PTHREAD_CPPFLAGS])dnl AC_CACHE_CHECK([if _REENTRANT is defined by the compiler], [mu_cv_have_reentrant], [AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ #ifndef _REENTRANT #error "_REENTRANT was not defined" #endif ]]) ], [mu_cv_have_reentrant=yes], [mu_cv_have_reentrant=no] )] ) ACM_POP_VAR([$0],[CPPFLAGS])dnl AS_IF([test "$mu_cv_have_reentrant" != yes],[ ACM_ADD_OPT([PTHREAD_CPPFLAGS],[-D_REENTRANT]) ]) dnl add 's' here and omit ranlib from the build step ARFLAGS=rDvs dnl bison3 complains loudly about a bunch of constructs that must still be used dnl if compatibility with bison2 is required, and appears to give us no clean dnl way to deal with that at all. We can tell bison3 not to bark by passing it dnl the -Wno-deprecated option, except bison2 chokes and dies on that too ... dnl Disabling those warnings is sub-optimal, but so is scaring end-users with dnl them and/or maintaining two sets of grammar files, or autogenerating them dnl just for a couple of gratuitously renamed defines. So the least ugly option dnl we have appears to be to test for bison3 here and disable those warnings if dnl it's the version in use, unless they were enabled explicitly by the user dnl who ran configure. At least for the next few years while most of the world dnl is still using bison2 ... AS_IF([test "$mu_cv_enable_bison_deprecated_warnings" = no],[ AS_IF([test "$YACC" != ":"],[ AS_IF([$YACC -Wno-deprecated -V > /dev/null 2>&1],[ mu_yacc_flags=" -Wno-deprecated" AC_MSG_NOTICE([disabled bison3 deprecation warnings]) ]) ]) ]) YACCFLAGS="-d$mu_yacc_flags \$(EXTRAYACCFLAGS)" LEXFLAGS="\$(EXTRALEXFLAGS)" dnl This one's work here is done now too, we don't need it elsewhere. AS_UNSET([mu_yacc_flags]) dnl Macros to define in the private config header. AC_DEFINE_UNQUOTED([EMDEBUG], [$(test "$mu_cv_enable_debug" != yes)$?], [build with additional debugging code]) AC_DEFINE_UNQUOTED([EM_USE_VALGRIND_FRIENDLY], [$(test "$mu_cv_enable_valgrind_friendly" != yes)$?], [do extra cleanup to be valgrind clean]) AC_DEFINE_UNQUOTED([EM_SYSTEM_RUNDIR],["$SYSTEM_RUNDIR"], [System directory for run-time variable data]) dnl ------- End: configure.stdtools ----- dnl ----- Begin: configure.i18n ----- dnl Makeup i18n configure boilerplate. dnl dnl Copyright 2003 - 2021, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. dnl Internal string encoding. dnl XXX Expand this to specify the actual encoding to use. dnl eg. WCHAR_T, utf8, iso88591 etc. AC_ARG_ENABLE([wide_strings], [AS_HELP_STRING([--enable-wide_strings], [use wide characters by default for internal]dnl [ string storage (default NO)])], [mu_cv_enable_wide_strings=$enableval], [mu_cv_enable_wide_strings=no]) AC_DEFINE_UNQUOTED([EM_USE_WIDE_STRINGS], [$(test "$mu_cv_enable_wide_strings" != yes)$?], [use wide characters by default for internal string storage]) AS_IF([test "$mu_cv_enable_wide_strings" = yes],[ makeup_build_flavour="${makeup_build_flavour}w" case $host in *-*-cygwin* | *-*-mingw32* ) AC_DEFINE([UNICODE],[1],[Enable Windows in 'UNICODE' mode]) AC_MSG_NOTICE([using UNICODE]) ;; esac ]) dnl On FreeBSD we need xlocale.h for strtod_l. That header is available on dnl Linux (though we shouldn't normally include it directly), but is not dnl available for mingw, and possibly some other platforms too (since it is dnl not a standard header at this time). AC_CHECK_HEADERS([xlocale.h]) dnl Windows has it's own incarnations of newlocale/strtod_l, and some platforms dnl like OpenBSD (as of 6.1) don't implement the POSIX newlocale at all, let dnl alone the xlocale extensions which use it. All we can really do for those dnl is fall back to using strtod, and if the user is in a locale where the dnl decimal point is a comma or something similar, they get to keep the pieces dnl or will need to override their default locale for running this. AC_CHECK_FUNCS([newlocale]) dnl The _create_locale function needs a later C runtime than the default. AS_IF([test "$ac_cv_func_newlocale" = yes],[], [ACM_PUSH_VAR([$0],[LIBS])dnl ACM_ADD_LIBS([LIBS],[-lmsvcr110]) AC_CHECK_FUNCS([_create_locale]) ACM_POP_VAR([$0],[LIBS])dnl ]) AC_CHECK_FUNCS([strtod_l _strtod_l], [break]) AS_IF([test "$ac_cv_func_strtod_l" = yes],[], [test "$ac_cv_func__strtod_l" = yes],[], [AC_MSG_WARN([No localisable strtod available on this system.]) AC_MSG_WARN([Binaries will need to be run in the "C" locale.])]) dnl String encoding conversion. AC_ARG_WITH([iconv], [AS_HELP_STRING([--with-iconv], [use iconv (from glibc or libiconv) for string encoding]dnl [ conversion (default YES)])], [mu_cv_with_iconv=$withval], [mu_cv_with_iconv=yes]) dnl Localised string substitution. AC_ARG_WITH([gettext], [AS_HELP_STRING([--with-gettext], [use gettext (from glibc or libintl) to localise selected]dnl [ literal strings (default YES)])], [mu_cv_with_gettext=$withval], [mu_cv_with_gettext=yes]) dnl Some of these are covered by AM_GNU_GETTEXT now. dnl AC_ARG_VAR([XGETTEXT],[xgettext command]) dnl AC_ARG_VAR([MSGMERGE],[msgmerge command]) dnl AC_ARG_VAR([MSGFMT],[msgfmt command]) dnl A couple still are not. AC_ARG_VAR([XGETTEXT_ARGS],[xgettext arguments]) AC_ARG_VAR([MSGINIT],[msginit command]) AC_ARG_VAR([ALL_LINGUAS],[The list of supported ISO 639 language codes]) AC_ARG_VAR([GETTEXT_MSG_SRC],[Limit the search for messages to $(GETTEXT_MSG_SRC)/]) dnl This one is needed for the AC_LIB_RPATH macro, required by AM_GNU_GETTEXT. FIND_AND_LINK_IF_LOCAL([config.rpath],[${ac_aux_dir#$srcdir/}],[/usr/share/gettext]) dnl This is needed because AM_ICONV_LINK, which can be pulled in by either AM_ICONV dnl or by AM_GNU_GETTEXT, currently leaks memory when running some test code, which dnl means the check for "working iconv" will wrongly fail if LSan is active. We do dnl need to wrap a much larger scope with this than would be ideal, because we can't dnl know exactly where the offending test will be expanded and run. ACM_SUPPRESS_LSAN([iconv]) dnl We check this before iconv, as it _may_ do some iconv tests dnl that we will not need to repeat, or even hit the cache for. AS_IF([test "$mu_cv_with_gettext" != no],[ dnl FIXME: AM_GNU_GETTEXT pollutes CPPFLAGS with the default path dnl for mingw-cross builds... so for now: ACM_PUSH_VAR([$0],[CPPFLAGS])dnl dnl imported from /usr/share/aclocal/gettext.m4 AM_GNU_GETTEXT([external],[need-ngettext]) ACM_POP_VAR([$0],[CPPFLAGS])dnl dnl These are set by AM_GNU_GETTEXT above now. dnl AC_CHECK_TOOL(XGETTEXT, xgettext) dnl AC_CHECK_TOOL(MSGMERGE, msgmerge) dnl AC_CHECK_TOOL(MSGFMT, msgfmt) dnl This is done implicitly by linking to a built test. dnl AC_CHECK_HEADERS(libintl.h) dnl Making this test useless. dnl if test "$ac_cv_header_libintl_h" != yes; then dnl and we must do this obscenity instead AS_IF([test "$gt_cv_func_gnugettext2_libc" != yes && test "$gt_cv_func_gnugettext2_libintl" != yes],[ AC_MSG_WARN([gettext not supported on this platform. disabling.]) mu_cv_with_gettext=no ],[ dnl These we must still do for ourself. AC_CHECK_TOOL([MSGINIT],[msginit],[:]) XGETTEXT_ARGS="-C -k_ -kP_:1,2 -s" ACM_ADD_LIBS([I18N_LIBS],[\$(LIBINTL)]) dnl Do we really want to always add this unconditionally here? ACM_ADD_LIBS([LIBS],[$LIBINTL]) ]) ]) AC_DEFINE_UNQUOTED([EM_USE_GETTEXT], [$(test "$mu_cv_with_gettext" != yes)$?], [use gettext to localise selected literal strings]) dnl String encoding conversion. AS_IF([test "$mu_cv_with_iconv" != no],[ dnl FIXME: AM_ICONV_LINK pollutes CPPFLAGS with the default path dnl for mingw-cross builds... so for now: ACM_PUSH_VAR([$0],[CPPFLAGS])dnl dnl imported from /usr/share/aclocal/iconv.m4 AM_ICONV ACM_POP_VAR([$0],[CPPFLAGS])dnl dnl and a couple of useful things it does not do. AS_IF([test "$am_cv_func_iconv" = yes],[ case $host in *-*-cygwin* | *-*-mingw32* ) mu_path_iconv=iconv ;; * ) AC_PATH_PROG([mu_path_iconv],[iconv]) ;; esac AS_IF([test -n "$mu_path_iconv"],[ AC_DEFINE_UNQUOTED([ICONV_UTIL_PATH], ["$mu_path_iconv"], [Define this with the path to the iconv utility]) ]) AC_DEFINE_UNQUOTED([HAVE_ICONV_CONST], [$(test "$am_cv_proto_iconv_arg1" != const)$?], [The system iconv requires a const char** second argument]) ACM_ADD_LIBS([I18N_LIBS],[\$(LIBICONV)]) dnl Do we really want to always add this unconditionally too? ACM_ADD_LIBS([LIBS],[$LIBICONV]) ]) ]) ACM_RESTORE_LSAN([iconv]) dnl ------- End: configure.i18n ----- dnl ----- Begin: configure.udev ----- dnl Makeup configure boilerplate. dnl dnl Copyright 2010 - 2021, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. dnl There are platforms where we know udev won't be present, so don't bother dnl testing for it at all there unless someone explicitly passes --with-udev. mu_expect_udev=no case $host in *-*-linux* ) mu_expect_udev=yes ;; esac dnl Option, check, and substvars for libudev. dnl --with-udev="rules only" is handled as a special case where libudev support dnl is not needed or wanted, but UDEV_RULES_DIR is for installing rules files. AC_ARG_WITH([udev], [AS_HELP_STRING([--with-udev], [use libudev for device detection (default yes on Linux, else no)])], [mu_cv_with_udev=$withval], [mu_cv_with_udev=$mu_expect_udev]) dnl Since udev merged with systemd, upstream claims to no longer support dnl static linking. In practice this means Wheezy was the last release dnl to ship with libudev.a, but even there support for it is crappy - as dnl it uses clock_gettime but libudev.pc does not declare the dependency dnl on librt required for that before it was moved to libc (for Jessie). dnl So now that even Wheezy-LTS is officially EOL, there's not much point dnl in us working around its brokenness, all we can do is warn users that dnl they can either have libudev or (maybe) static linking (if nothing dnl else required is also broken for that, which is far from assured in dnl these Modern Times We Live In). AS_IF([test "$mu_cv_with_udev" != no],[ AS_IF([test "$mu_cv_enable_shared" = no],[ AC_MSG_WARN([libudev does not support static linking]) mu_cv_with_udev="rules only" ], [test "$mu_cv_with_udev" != "rules only"],[ AC_CHECK_LIB([udev],[udev_new], [ AC_DEFINE([HAVE_LIBUDEV],[1],[libudev is available]) UDEV_CPPFLAGS= ACM_PKG_CONFIG_GET_LIBS([UDEV_LIBS],[libudev],[udev]) mu_cv_with_udev=yes dnl The udev_device_get_tags_list_entry function was added in udev 154 dnl and udev_device_get_sysattr_list_entry was added in udev release 167, dnl which is newer than some current distro releases are shipping still. dnl RHEL/CentOS 6 at least appears to ship udev 147 and it isn't EOL yet. ACM_PUSH_VAL([$0],[LIBS],[-l$UDEV_LIBS])dnl AC_CHECK_FUNCS([udev_device_get_tags_list_entry udev_device_get_sysattr_list_entry]) ACM_POP_VAR([$0],[LIBS])dnl ],[ AS_IF([test "$mu_cv_with_udev" != auto],[ AC_MSG_WARN([libudev not found]) ACM_ADD_MISSING_DEP([libudev-dev (or configure --without-udev)]) ]) mu_cv_with_udev=no ]) ]) ]) dnl The udev.pc only defines the udevdir root, so we still need to construct dnl the rules directory from that ourselves either way. Don't bother to set dnl UDEV_RULES_DIR if we're building --without-udev. It being empty provides dnl a signal to not try and install any rules in that case too. dnl dnl There's some additional hackery we need to do here, because as with the dnl rest of systemd, the udev.pc doesn't respect any alternative prefix, it dnl simply hardcodes what systemd/udevd themselves were built with. In the dnl case of udev it is even worse though, because it won't search alternative dnl directories to what it was build with. At present, rules are only ever dnl read from /etc, /run, or whatever UDEVLIBEXECDIR was set to when udev was dnl built (typically either /lib or /usr/lib). dnl dnl So right now, the least horrid thing we can do is, if we are building with dnl --prefix=/usr, then we install rules to the vendor rules dir reported by dnl udev.pc, otherwise we install them to the /etc/udev/rules.d directory. dnl If users need or want something different to that then they can explicitly dnl override the UDEV_DIR variable. For --prefix=/usr/local, we can't usefully dnl install rules to $libdir/udev, because udev quite simply won't look there. dnl dnl The fallback for ACM_PKG_CONFIG_GET_VAR here should probably instead be dnl $EXP_LIBDIR/udev, but on systems with "split usr" the vendor rules are all dnl installed in /lib, and on those with "merged usr" both /lib and /usr/lib dnl point to the same place anyway, so it's probably the safest path to default dnl to here for now if pkg-config support is not available. Especially since dnl in the "split" case, udevd will only look in one of those locations. This dnl one in particular is even more awful than the usual systemd-grade mess is. AS_IF([test "$mu_cv_with_udev" != no && test -z "$UDEV_RULES_DIR"],[ ACM_IF_VENDOR_BUILD([ACM_PKG_CONFIG_GET_VAR([UDEV_DIR],[udev],[udevdir],[/lib/udev])], [AC_MSG_CHECKING([for UDEV_DIR]) UDEV_DIR="/etc/udev" AC_MSG_RESULT([$UDEV_DIR (for prefix=$EXP_PREFIX)]) ]) UDEV_RULES_DIR="$UDEV_DIR/rules.d" ]) dnl We don't usually expect people to mess with these two. AC_SUBST([UDEV_CPPFLAGS]) AC_SUBST([UDEV_LIBS]) dnl But this one they might want to override in some cases. AC_ARG_VAR([UDEV_RULES_DIR], [where to install udev rules]) dnl ------- End: configure.udev ----- dnl ----- Begin: configure.systemd ----- dnl Makeup configure boilerplate. dnl dnl Copyright 2018, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. dnl There are platforms where we know systemd won't be present, so don't annoy dnl their users with its cruft unless we're explicitly passed --enable-systemd. mu_expect_systemd=no case $host in *-*-linux* ) mu_expect_systemd=yes ;; esac dnl Option and substvar for installing systemd units. AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [install systemd unit files (default yes on Linux, else no)])], [mu_cv_enable_systemd=$enableval], [mu_cv_enable_systemd=$mu_expect_systemd]) dnl Don't bother setting SYSTEMD_UNIT_DIR if we're using --disable-systemd. dnl It being empty provides a signal to not try and install any units in that dnl case too. dnl dnl There's some additional hackery we need to do here, because systemd has a dnl deep assumption of everything being installed into vendor space and the dnl systemd.pc it creates doesn't doesn't respect an alternative $prefix, it dnl just hardcodes the one which systemd itself was built with. dnl dnl So right now, the least horrid thing we can do is, if we are building with dnl --prefix=/usr, then we install units to the vendor unit dir reported by dnl systemd.pc, otherwise we install them to $libdir/systemd/system. If users dnl need or want something different to that then they can explicitly override dnl the SYSTEMD_UNIT_DIR variable. This should work ok for --prefix=/usr/local, dnl because systemd will search there too, but it won't work by default for any dnl other oddball values of $prefix. We could default to systemdsystemconfdir dnl (aka /etc/systemd/system) for the oddball case, but if people are using an dnl odd prefix, then they probably have their own plan for mapping that to dnl locations which will actually be searched, so for now at least, we will dnl respect that too. dnl dnl The fallback for ACM_PKG_CONFIG_GET_VAR here should probably instead be dnl $EXP_LIBDIR/systemd/system too, but on systems with "split usr" the vendor dnl units are installed in /lib, and on those with "merged usr" both /lib and dnl /usr/lib point to the same place anyway, so it's probably the safest path dnl to default to here for now if pkg-config support is not available. AS_IF([test "$mu_cv_enable_systemd" = yes],[ ACM_IF_VENDOR_BUILD([ACM_PKG_CONFIG_GET_VAR([SYSTEMD_UNIT_DIR],[systemd], [systemdsystemunitdir],[/lib/systemd/system])], [AC_MSG_CHECKING([for SYSTEMD_UNIT_DIR]) SYSTEMD_UNIT_DIR="$EXP_LIBDIR/systemd/system" AC_MSG_RESULT([$SYSTEMD_UNIT_DIR (for prefix=$EXP_PREFIX)]) ]) ]) AC_ARG_VAR([SYSTEMD_UNIT_DIR], [where to install systemd units]) AC_DEFINE_UNQUOTED([EM_USE_NOTIFY_SOCKET], [$(test "$mu_cv_enable_systemd" != yes)$?], [Build with service manager NOTIFY_SOCKET support]) dnl ------- End: configure.systemd ----- dnl ----- Begin: configure.sysctl ----- dnl Makeup configure boilerplate. dnl dnl Copyright 2018, Ron dnl dnl This file is distributed under the terms of the GNU GPL version 2. dnl dnl As a special exception to the GPL, it may be distributed without dnl modification as a part of a program using a makeup generated build dnl system, under the same distribution terms as the program itself. dnl Only enable this by default on platforms where we expect sysctl.d to be dnl present. People can still --enable-sysctl to override that if needed. mu_expect_sysctl=no case $host in *-*-linux* ) mu_expect_sysctl=yes ;; esac dnl Option and substvar for installing sysctl snippets. AC_ARG_ENABLE([sysctl], [AS_HELP_STRING([--enable-sysctl], [install sysctl configuration files (default yes on Linux, else no)])], [mu_cv_enable_sysctl=$enableval], [mu_cv_enable_sysctl=$mu_expect_sysctl]) dnl Don't bother setting SYSCTL_DIR if we're using --disable-sysctl. It being dnl empty provides a signal to not try and install any files in that case too. dnl dnl The systemd.pc provides what it expects to be the system path for these, dnl but there's some additional hackery we need to do because systemd assumes dnl everything will be installed into the vendor space and doesn't respect an dnl alternate $prefix for local builds, hardcoding only the one which systemd dnl itself was built with. dnl dnl So right now, the least horrid thing we can do is, if we are building with dnl --prefix=/usr, then we install them to the vendor sysctl dir reported by dnl systemd.pc, otherwise we install them to $libdir/sysctl.d. If users need dnl or want something different to that, then they can override the SYSCTL_DIR dnl variable explicitly. This should work ok for --prefix=/usr/local, because dnl both systemd-sysctl and sysctl(8) will search there too, but it won't work dnl by default for other oddball values of $prefix. We could default to using dnl /etc/sysctl.d for the oddball case, but if people are using an odd prefix, dnl then they probably have their own plan for mapping that to locations which dnl will actually be searched, so for now at least, we'll respect that too. AS_IF([test "$mu_cv_enable_sysctl" = yes],[ ACM_IF_VENDOR_BUILD([ACM_PKG_CONFIG_GET_VAR([SYSCTL_DIR],[systemd], [sysctldir],[$EXP_LIBDIR/sysctl.d])], [AC_MSG_CHECKING([for SYSCTL_DIR]) SYSCTL_DIR="$EXP_LIBDIR/sysctl.d" AC_MSG_RESULT([$SYSCTL_DIR (for prefix=$EXP_PREFIX)]) ]) ]) AC_ARG_VAR([SYSCTL_DIR], [where to install sysctl configuration]) dnl ------- End: configure.sysctl ----- dnl ----- Begin: ./Makeup/config/configure.bit-babbler ----- dnl Makeup extra configuration for bit-babbler. dnl dnl Copyright 2003 - 2021, Ron Lee. dnl AC_LANG_PUSH([C++]) case $host in *-*-linux* ) ;; *-*-cygwin* | *-*-mingw32* ) dnl We don't have unix domain sockets on windows, so default to TCP there. AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], [SEEDD_CONTROL_SOCKET=tcp:localhost:56789]) dnl We need at least 0x0600 to get AI_ADDRCONFIG for getaddrinfo bb_cv_env_winver=0x0600 bb_cv_env__win32_winnt=0x0600 AC_DEFINE_UNQUOTED([WINVER], [$bb_cv_env_winver], [Select the MSW version to be compatible with]) AC_DEFINE_UNQUOTED([_WIN32_WINNT], [$bb_cv_env__win32_winnt], [The MSW NT version to be compatible with]) AC_MSG_NOTICE([using WINVER = '$bb_cv_env_winver', _WIN32_WINNT = '$bb_cv_env__win32_winnt']) dnl Testing for vasprintf has the opposite problem to what localtime_r does dnl as described below. It's included in the system library, so a link test dnl passes with a faked prototype, but an actual build fails because it is dnl not visible in stdio.h unless _GNU_SOURCE is defined. AC_DEFINE([_GNU_SOURCE],[1],[Include support for vasprintf et al.]) ;; *-*-openbsd* ) dnl The default pthread stack size on OpenBSD 6.1 is 512kB, so fix that. AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) AC_DEFINE([HAVE_BROKEN_STDIO_LOCKING],[1], [Workaround OpenBSD _thread_flockfile cancellation bug]) ;; *-*-freebsd* ) dnl The default pthread stack size on FreeBSD 11 is 2MB, so fix that. dnl So far we haven't actually had this smash the stack there with dnl the default size (unlike OpenBSD, MacOS and Windows), but let's dnl not wait until we do, just use the same size as everywhere else. AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) ;; *-*-darwin* ) dnl The default pthread stack size on MacOS is only 512kB, and we expect to dnl need more than that, so bring it into line with the normal Linux default. AS_IF([test -z "$THREAD_STACK_SIZE"],[THREAD_STACK_SIZE=8192]) ;; esac dnl /var could be a remote mount which isn't available at early boot when seedd dnl is first started, but /run is supposed to be ready before any ordinary early dnl boot process, even if it is a separate mount like a tmpfs, so default to it dnl unless we know it's not expected to be supported. FHS 3.0 allows /var/run dnl to be an alias to /run, and that is what most (but not all) Linux distros dnl currently do. The BSDs (aside from Debian's kFreeBSD port) aren't riding dnl this train yet though, so we still use /var/run there instead of rudely dnl creating a new directory in the root of people's systems. Aside from the dnl override for Windows above, we use SYSTEM_RUNDIR from configure.stdtools to dnl decide which to use here. AS_IF([test -z "$SEEDD_CONTROL_SOCKET"], [SEEDD_CONTROL_SOCKET="$SYSTEM_RUNDIR/bit-babbler/seedd.socket"]) AC_ARG_VAR([SEEDD_CONTROL_SOCKET], [Set the default to use for the seedd control socket]) AS_IF([test -n "$SEEDD_CONTROL_SOCKET"],[ AC_DEFINE_UNQUOTED([SEEDD_CONTROL_SOCKET],["$SEEDD_CONTROL_SOCKET"], [Set the default to use for the seedd control socket]) ]) AC_ARG_VAR([THREAD_STACK_SIZE], [Explicitly set the per-thread stack size in kB (if non-zero)]) AS_IF([test -n "$THREAD_STACK_SIZE"],[ AC_DEFINE_UNQUOTED([THREAD_STACK_SIZE],[$THREAD_STACK_SIZE], [Explicitly set the per-thread stack size in kB (if non-zero)]) ]) AC_C_BIGENDIAN AC_CHECK_FUNCS([vasprintf]) dnl See if clock_gettime is available even without librt. On some systems dnl it is, and as of glibc 2.17 the clock_* functions moved from librt to dnl the main C library as well. We don't particularly want to depend upon dnl librt, for lots of reasons, but using this if we do have it is sane. dnl dnl Testing for localtime_r and gmtime_r in this way fails on mingw-w64 4.9.2 dnl even though it does actually have them when either of _POSIX_C_SOURCE or dnl _POSIX_THREAD_SAFE_FUNCTIONS are defined - because AC_CHECK_FUNCS tries to dnl link a faked prototype, but in the mingw time.h they are inline functions dnl only, wrapping the system localtime_s function. We could do a more complex dnl test here, but it's probably ok to just let them fall back to using the dnl non-reentrant versions, because on MSW, localtime and gmtime are in theory dnl implemented using thread-local storage, so they are thread-safe anyway. AC_CHECK_FUNCS([gettimeofday localtime_r gmtime_r timegm clock_gettime]) dnl Check if SIGRTMIN is available. MacOS 10.12.1 still doesn't have it dnl (though FreeBSD added support for it in version 7). AC_CHECK_DECLS([SIGRTMIN],[],[],[[#include ]]) dnl OpenBSD as of at least 6.1 doesn't provide this (though FreeBSD and MacOS dnl at least do) and it isn't required by POSIX.1-2008 (SuSv4 TC2 2016). AC_CHECK_DECLS([LOG_MAKEPRI],[],[],[[#include ]]) ACM_CXX_FORCED_UNWIND ACM_FUNC_PTHREAD_SETNAME ACM_TR1_UNORDERED_MAP ACM_CPP_PUSH_POP_DIAGNOSTIC_MACROS ACM_DEFINE_FUNCTION_ATTRIBUTE([noreturn],[BB_NORETURN]) ACM_DEFINE_FUNCTION_ATTRIBUTE([const], [BB_CONST]) ACM_DEFINE_FUNCTION_ATTRIBUTE([pure], [BB_PURE]) ACM_DEFINE_FUNCTION_ATTRIBUTE([cold], [BB_COLD]) ACM_DEFINE_FUNCTION_ATTRIBUTE([no_sanitize("float-divide-by-zero")], [BB_NO_SANITIZE_FLOAT_DIVIDE_BY_ZERO]) ACM_DEFINE_FUNCTION_ATTRIBUTE([no_sanitize("unsigned-integer-overflow")], [BB_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW]) ACM_DEFINE_PRINTF_FORMAT_ATTRIBUTE([BB_PRINTF_FORMAT]) ACM_DEFINE_STRFTIME_FORMAT_ATTRIBUTE([BB_STRFTIME_FORMAT]) ACM_DEFINE_STATEMENT_ATTRIBUTE([fallthrough],[BB_FALLTHROUGH],[],[],[do {} while(0)]) dnl The FreeBSD libusb3 provides compatibility for libusb-0.1, libusb-1.0 and dnl their own libusb2 interface, with libusb.so as the -dev link to it. dnl On Linux libusb.so usually points to libusb-0.1, which we don't want. dnl The header file is usually in $prefix/include/libusb-1.0 on linux, and in dnl the system include dir on FreeBSD. AC_ARG_VAR([LIBUSB_DIR], [Path for libusb (mostly for cross-compiling)]) AC_ARG_VAR([USB_CPPFLAGS], [Extra CPPFLAGS for libusb (mostly for cross-compiling)]) AC_ARG_VAR([USB_LDFLAGS], [Extra LDFLAGS for libusb (mostly for cross-compiling)]) AS_IF([test -n "$LIBUSB_DIR"],[ USB_CPPFLAGS="-I$LIBUSB_DIR/include $USB_CPPFLAGS" USB_LDFLAGS="-L$LIBUSB_DIR/lib $USB_LDFLAGS" ]) ACM_PUSH_VAL([$0],[CPPFLAGS],[$USB_CPPFLAGS])dnl AC_CHECK_HEADERS([libusb-1.0/libusb.h libusb.h],[break]) AS_IF([test "$ac_cv_header_libusb_1_0_libusb_h" = "yes"],[ libusb_header="" ],[test "$ac_cv_header_libusb_h" = "yes"],[ libusb_header="" ]) AS_IF([test -n "$libusb_header"],[ AC_DEFINE_UNQUOTED([LIBUSB_HEADER],[$libusb_header],[libusb header location]) ]) ACM_POP_VAR([$0],[CPPFLAGS])dnl ACM_PUSH_VAL([$0],[LDFLAGS],[$USB_LDFLAGS])dnl ACM_PUSH_VAR([$0],[LIBS])dnl AC_SEARCH_LIBS([libusb_init], [usb-1.0 usb], [ AC_DEFINE([HAVE_LIBUSB],[1],[libusb is available]) AS_CASE([$ac_cv_search_libusb_init], [-l*],[[USB_LIBS=${ac_cv_search_libusb_init#-l}]]) dnl We need the double quoting in the case above or the # in dnl the prefix removal breaks the macro expansion horribly. ],[ AC_MSG_WARN([libusb not found]) case $host in *-*-openbsd* ) ACM_ADD_MISSING_DEP([libusb1-1.0]) ;; *-*-kfreebsd* ) ACM_ADD_MISSING_DEP([libusb2-dev]) ;; * ) ACM_ADD_MISSING_DEP([libusb-1.0-0-dev]) ;; esac ]) dnl We need to test for these explicitly, because LIBUSB_API_VERSION won't do. dnl It wasn't bumped when libusb_strerror was added, and it just doesn't exist dnl at all in the FreeBSD libusb3, which does provide libusb_get_port_numbers dnl even though it currently just returns LIBUSB_ERROR_NOT_SUPPORTED ... dnl And that kids, is why we have autoconf. dnl As of FreeBSD 11, we also need to test for libusb_has_capability, since it dnl appears they added the hotplug support API and bumped the compatibility dnl version, but didn't actually add the capability test function ... AC_CHECK_FUNCS([libusb_strerror libusb_get_port_numbers libusb_has_capability]) ACM_POP_VAR([$0],[LIBS,LDFLAGS])dnl AC_SUBST([USB_LIBS]) AC_LANG_POP([C++]) ACM_CHECKPOINT_MISSING_DEPS dnl We defer failing out on this until here, because the most likely reason is dnl just that libusb itself is missing, which will be reported above, and that dnl is a more useful explaination to give if so. This should only trigger if dnl the lib is installed but the header is in some wacky place we didn't look. AS_IF([test -z "$libusb_header"],[ AC_MSG_ERROR([No libusb header file found]) ]) dnl We need this one in the udev rules to work around more systemd dumb-fuckery dnl that its cult members refuse to recognise the reality of yet. It seems a dnl dangerous value to hard-code, but libvirt essentially hard-codes it too ... dnl Hopefully the real problem will be addressed in a release or two's time and dnl then we can get rid of this workaround again. AC_ARG_VAR([LIBVIRT_SOCKET], [Path to the libvirtd unix control socket]) LIBVIRT_SOCKET="$SYSTEM_RUNDIR/libvirt/libvirt-sock" AC_MSG_NOTICE([Configured bit-babbler $PACKAGE_VERSION]) AC_MSG_NOTICE([ with udev: $mu_cv_with_udev]) AC_MSG_NOTICE([ SEEDD_CONTROL_SOCKET: $SEEDD_CONTROL_SOCKET]) AC_MSG_NOTICE([ LIBVIRT_SOCKET: $LIBVIRT_SOCKET]) AS_IF([test -n "$THREAD_STACK_SIZE"],[ AC_MSG_NOTICE([ THREAD_STACK_SIZE: $THREAD_STACK_SIZE]) ]) case $host in *-*-openbsd* ) AC_MSG_NOTICE([NOTE: On OpenBSD you will need to build this by using gmake,]) AC_MSG_NOTICE([ and you will need to have the bash package installed.]) ;; *-*-freebsd* ) AC_MSG_NOTICE([NOTE: On FreeBSD you will need to build this by using gmake,]) AC_MSG_NOTICE([ and you will need to have the bash package installed.]) ;; esac AC_CONFIG_FILES([munin/bit_babbler],[chmod +x munin/bit_babbler]) dnl ------- End: ./Makeup/config/configure.bit-babbler ----- AC_ARG_VAR([MAKEUP_PLATFORM_HEADER],[platform specific config header]) AC_ARG_VAR([MAKEUP_FLAVOUR_HEADER],[feature specific config header]) MAKEUP_PLATFORM_HEADER="${makeup_build_platform}_setup.h" MAKEUP_FLAVOUR_HEADER="$makeup_build_platform${makeup_build_flavour}_setup.h" CPPFLAGS="$CPPFLAGS -I\$(top_builddir)/include \$(EXTRACPPFLAGS)" CFLAGS="$CFLAGS \$(EXTRACFLAGS)" CXXFLAGS="$CXXFLAGS \$(EXTRACXXFLAGS)" AH_BOTTOM([#include ]) ACM_CONFIG_MAKEFILE([Makeup/gmake-fragments], [ makeup_version="0.38" package_name="bit-babbler" package_version="0.9" __package_config_dir="" __package_config_public="setup.h" ]) AC_CONFIG_HEADERS([include/private_setup.h:private_setup.h.in],[], [ if test ! -e $srcdir/private_setup.h.in; then touch $srcdir/private_setup.h.in; fi; ]) ACM_CONFIG_HEADER([setup.h]) AC_CONFIG_FILES([Makefile.acsubst.bit-babbler:Makeup/config/acsubst.bit-babbler]) AC_CONFIG_FILES([Makefile.acsubst.sysctl:Makeup/ac-fragments/acsubst.sysctl]) AC_CONFIG_FILES([Makefile.acsubst.systemd:Makeup/ac-fragments/acsubst.systemd]) AC_CONFIG_FILES([Makefile.acsubst.udev:Makeup/ac-fragments/acsubst.udev]) AC_CONFIG_FILES([60-bit-babbler.rules:Makeup/config/acfile.60-bit-babbler.rules]) AC_CONFIG_FILES([bit-babbler-sysctl.conf:Makeup/config/acfile.bit-babbler-sysctl.conf]) AC_CONFIG_FILES([seedd.service:Makeup/config/acfile.seedd.service]) AC_CONFIG_FILES([seedd-wait.service:Makeup/config/acfile.seedd-wait.service]) AC_OUTPUT bit-babbler-0.9/debian/0002755000000000000000000000000014136173163011646 5ustar bit-babbler-0.9/debian/bit-babbler.NEWS0000644000000000000000000000351614136173163014514 0ustar bit-babbler (0.8) unstable; urgency=medium The 0.8 release changes the way which seedd(1) is normally configured when started as a system daemon. Previously, configuration options could be set in /etc/default/seedd, but systemd doesn't support handling those in the way which we did so in the SysV init script. So now, when seedd is started from either the seedd.service systemd unit or the SysV init script, it will be configured using the options set in /etc/bit-babbler/seedd.conf instead. If you have customised the configuration in /etc/default/seedd then it will automatically be preserved during upgrade by generating an equivalent custom seedd.conf for you. The old configuration file content will be retained in /etc/default/seedd.dpkg-old, in case there are other things in there (like comments) that you do wish to keep a note of somewhere, but it can be safely removed when there is nothing in it which you still need. Nothing in this package will use anything from /etc/default after this. For reference, the packaged default seedd.conf will still be installed (in the same way as if you had selected "keep your currently-installed version" at the dpkg conffile prompt) as /etc/bit-babbler/seedd.conf.dpkg-new for you to inspect. You can safely modify the generated seedd.conf however you wish after this, as it will not be (re)generated again on future updates. Future updates will be handled with the normal dpkg mechanism for conffiles when there are any changes in the packaged and locally installed versions which need to be resolved. If you have not modified /etc/default/seedd, then it will simply be removed and the equivalent default seedd.conf will be installed, making this change (even more) completely transparent to you. -- Ron Lee Thu, 08 Feb 2018 10:26:52 +1030 bit-babbler-0.9/debian/bit-babbler.docs0000644000000000000000000000002514136173163014660 0ustar doc/virtual_machines bit-babbler-0.9/debian/bit-babbler.postinst0000644000000000000000000001071114136173163015616 0ustar #!/bin/sh set -e # summary of how this script can be called: # * `configure' # * `abort-upgrade' # * `abort-remove' `in-favour' # # * `abort-remove' # * `abort-deconfigure' `in-favour' # `removing' # # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package migrate_seedd_conf() { local oldconf='/etc/default/seedd' local newconf='/etc/bit-babbler/seedd.conf' local saveconf="${oldconf}.dpkg-old" # Remove the old config if it was unmodified rm -f "${oldconf}.dpkg-remove" # Otherwise, if it's still there and we still own it, let's convert it. [ -e "$oldconf" ] || return 0 dpkg-query -L bit-babbler | grep -F -q -x "$oldconf" || return 0 # Preserve the new config file that was shipped with the package, in the # same way as if the user had chosen "keep my local changes" at the dpkg # conffile update prompt. Except for this we keep them without prompting. [ ! -e "$newconf" ] || mv -f "$newconf" "${newconf}.dpkg-new" # Simulate how the old config was converted to command line options in the # init script, then generate a new format config file using those options. # Pretend we were invoked by systemd for that, since the new init script # will itself add the needed options which that would omit. ( SEEDD_ARGS="-k" . "$oldconf" [ -z "$CONTROL_GROUP" ] || SEEDD_ARGS="$SEEDD_ARGS --socket-group $CONTROL_GROUP" export NOTIFY_SOCKET=@dummy cat > "$newconf" <<-EOF # These options were automatically migrated from $oldconf # during upgrade to the bit-babbler 0.8 (or later) release. # # A copy of the old file has been preserved in $saveconf # which can safely be deleted now after confirming that there is nothing # remaining in it which you do wish to retain a copy of. # # This file can safely be edited to customise the seedd configuration, it will # not be automatically (re)generated again after the initial conversion of the # old configuration file has created it. EOF if /usr/bin/seedd --gen-conf $SEEDD_ARGS >> "$newconf" 2>/dev/null; then # Preserve a copy of their old config. We migrated anything which was # actively being used by the old init script, but there may be comments # or commented out configurations, or other things which it would be a # bit rude of us to just completely delete without asking. cat - "$oldconf" > "$saveconf" <<-EOF # NOTE: This file contains the last content from $oldconf # prior to it being converted into the new $newconf # # It may safely be deleted if there is nothing else in it which you wish # to retain. The seedd configuration uses options from the new file now # and there is nothing which still uses anything from the old one at all. EOF rm -f "$oldconf" echo "Automatically migrated $oldconf configuration to $newconf" echo "The previous configuration file was saved to $saveconf" fi ) } migrate_sysctl_conf() { local oldconf='/etc/sysctl.d/bit-babbler-sysctl.conf' local keepconf="${oldconf}.dpkg-backup" local killconf="${oldconf}.dpkg-remove" if [ -e "$keepconf" ]; then echo "Preserving modified $oldconf as a local override." echo "The package supplied configuration is now in /usr/lib/sysctl.d instead." mv -f "$keepconf" "$oldconf" elif [ -e "$killconf" ]; then echo "Moved unmodified $oldconf to /usr/lib/sysctl.d" rm -f "$killconf" fi } case "$1" in configure) addgroup --quiet --system bit-babbler # Version 0.8 adds /etc/bit-babbler/seedd.conf, replacing the previous # daemon configuration options which were set in /etc/default/seedd. dpkg --compare-versions -- "$2" ge-nl '0.8~' || migrate_seedd_conf # Version 0.9 moves /etc/sysctl.d/bit-babbler-sysctl.conf to the # /usr/lib/sysctl.d directory, but we need to preserve the one in # /etc exactly as it is, as an override, if it was modified. dpkg --compare-versions -- "$2" ge-nl '0.9~' || migrate_sysctl_conf sysctl -q -p /usr/lib/sysctl.d/bit-babbler-sysctl.conf || true ;; abort-upgrade|abort-remove|abort-deconfigure) ;; *) echo "postinst called with unknown argument \`$1'" >&2 exit 1 ;; esac #DEBHELPER# exit 0 # vi:sts=4:sw=4:noet bit-babbler-0.9/debian/bit-babbler.postrm0000644000000000000000000001010614136173163015255 0ustar #! /bin/sh set -e # summary of how this script can be called: # * `remove' # * `purge' # * `upgrade' # * `failed-upgrade' # * `abort-install' # * `abort-install' # * `abort-upgrade' # * `disappear' overwrit>r> # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package revert_seedd_conf() { local oldconf='/etc/default/seedd' # Check that the old file hasn't already been removed, and that it hasn't # been usurped by some other package (which is unlikely, but would be Bad # for what we're going to do next). [ -e "${oldconf}.dpkg-remove" ] || return 0 dpkg-query -L bit-babbler | grep -F -q -x "$oldconf" || return 0 # Put things back to how they started before the upgrade. mv "${oldconf}.dpkg-remove" "$oldconf" } revert_sysctl_conf() { local oldconf='/etc/sysctl.d/bit-babbler-sysctl.conf' # Check that this package does still own the old file dpkg-query -L bit-babbler | grep -F -q -x "$oldconf" || return 0 # Put things back to how they started before the upgrade. # At most, only one of these should normally exist. for f in "${oldconf}.dpkg-remove" "${oldconf}.dpkg-backup"; do if [ -e "$f" ]; then echo "Restoring $oldconf from $f" mv "$f" "$oldconf" fi done } purge_etc() { # Remove any local admin additions which in the past would have been # conffiles that dpkg would have removed on purge. rm -f '/etc/sysctl.d/bit-babbler-sysctl.conf' \ '/etc/systemd/system/seedd.service' \ '/etc/systemd/system/seedd-wait.service' \ '/etc/udev/rules.d/60-bit-babbler.rules' # Remove any .wants, .requires, .service.d etc. directories for seedd itself. rm -rf /etc/systemd/system/seedd.service.* \ /etc/systemd/system/seedd-wait.service.* # Remove references to seedd in other service .wants or .requires directories. find /etc/systemd -name 'seedd.service' -o -name 'seedd-wait.service' -delete if [ -d /run/systemd/system ] ; then systemctl --system daemon-reload >/dev/null || true fi } case "$1" in abort-install|abort-upgrade) # Version 0.8 adds /etc/bit-babbler/seedd.conf, replacing the previous # daemon configuration options which were set in /etc/default/seedd. dpkg --compare-versions -- "$2" ge-nl '0.8~' || revert_seedd_conf # Version 0.9 moves /etc/sysctl.d/bit-babbler-sysctl.conf to the # /usr/lib/sysctl.d directory, but we need to preserve the one in # /etc exactly as it is, as an override, if it was modified. dpkg --compare-versions -- "$2" ge-nl '0.9~' || revert_sysctl_conf ;; remove|purge|upgrade|failed-upgrade|disappear) ;; *) echo "postrm called with unknown argument \`$1'" >&2 exit 1 ;; esac #DEBHELPER# if [ "$1" = "purge" ]; then # Remove any cruft that might be left behind if someone purges this # in the middle of a 'failed' conversion. rm -f '/etc/sysctl.d/bit-babbler-sysctl.conf.dpkg-backup' \ '/etc/sysctl.d/bit-babbler-sysctl.conf.dpkg-remove' # This is a bit controversial, on the one hand it would remove files which # ostensibly aren't owned by the package. But on the other, they are files # which historically would have been conffiles that were, and which would # have been cleaned up if the package was fully purged. The new fad to put # configuration into $libdir and generate unmanaged content in /etc breaks # all that and potentially leaves a mess there for admin to tidy up, which # they can't just do by purging the package anymore. # # For now, we won't do it, we'll just document what should be done if we do # and let's see if some other solution for this problem emerges, or if the # tide of consensus clearly shifts one way or the other about how to manage # this regression from the historical best practices developed by Debian. # # purge_etc fi exit 0 # vi:sts=4:sw=4:noet bit-babbler-0.9/debian/bit-babbler.preinst0000644000000000000000000000644214136173163015425 0ustar #! /bin/sh set -e # summary of how this script can be called: # * `install' # * `install' # * `upgrade' # * `abort-upgrade' # for details, see http://www.debian.org/doc/debian-policy/ or # the debian-policy package # This returns true (0) if the conffile belongs to this package, and a modified # version of it currently exists which needs to be preserved. If so, the file # will remain untouched. In all other cases it will return false (1). If an # unmodified verson exists, it will be renamed by appending .dpkg-remove, so # that it can be restored if the package upgrade fails, or removed in postinst # if the upgrade completes successfully. check_conffile() { local f=$1 # Check that the file hasn't already been removed, and that it hasn't been # usurped by some other package (which is unlikely, but would Very Bad for # what we are about to do next). [ -e "$f" ] || return 1 dpkg-query -L bit-babbler | grep -F -q -x "$f" || return 1 local sys_sum=$(md5sum "$f" | sed -e 's/ .*//') local pkg_sum=$(dpkg-query -W -f='${Conffiles}' bit-babbler | sed -n -e "\'^ $f ' { s/ obsolete$//; s/.* //; p }") # If it's unmodified, move it aside for deletion if the upgrade succeeds. if [ "$sys_sum" = "$pkg_sum" ]; then mv -f "$f" "${f}.dpkg-remove" return 1 fi # Otherwise, we'll migrate it to its new form in postinst. return 0 } check_seedd_conf() { # Just move it out of the way if it's unmodified. If it is modified, then # in postinst we'll generate a custom seedd.conf based upon its content, to # replace the one that is shipped in the new package. # # Because we don't remove the original conffile here before the new files # are unpacked, dpkg will remember it as an 'obsolete' conffile until the # next package upgrade (by which time it will really have been removed). check_conffile '/etc/default/seedd' || return 0 } check_sysctl_conf() { local oldconf='/etc/sysctl.d/bit-babbler-sysctl.conf' # If this one was modified, we also move it aside so that dpkg will forget # that it was a conffile, and not keep tracking it as 'obsolete' for purge. # This way a purge will do the same thing whether the modified file was # created before or after this upgrade (ie. leave it alone). We will move # it back again in the postinst to preserve the local admin's configuration # as an override of the packaged version that is now in /usr/lib/sysctl.d if check_conffile "$oldconf"; then mv -f "$oldconf" "${oldconf}.dpkg-backup" fi } case "$1" in install|upgrade) # Version 0.8 adds /etc/bit-babbler/seedd.conf, replacing the previous # daemon configuration options which were set in /etc/default/seedd. dpkg --compare-versions -- "$2" ge-nl '0.8~' || check_seedd_conf # Version 0.9 moves /etc/sysctl.d/bit-babbler-sysctl.conf to the # /usr/lib/sysctl.d directory, but we need to preserve the one in # /etc exactly as it is, as an override, if it was modified. dpkg --compare-versions -- "$2" ge-nl '0.9~' || check_sysctl_conf ;; abort-upgrade) ;; *) echo "preinst called with unknown argument \`$1'" >&2 exit 1 ;; esac #DEBHELPER# exit 0 # vi:sts=4:sw=4:noet bit-babbler-0.9/debian/bit-babbler.seedd.init0000644000000000000000000000450414136173163015764 0ustar #!/bin/bash # ### BEGIN INIT INFO # Provides: seedd # Required-Start: $local_fs $syslog # Required-Stop: $local_fs $syslog # Default-Start: S # Default-Stop: 0 1 6 # Short-Description: BitBabbler entropy source daemon # Description: ### END INIT INFO # Note we deliberately don't create a pidfile for this. # We could, but it's not really worth the fuss if we have --exec matching. NAME=seedd DESC="BitBabbler entropy source daemon" DAEMON=/usr/bin/seedd SEEDD_CONFIG=/etc/bit-babbler/seedd.conf PATH=/sbin:/bin:/usr/sbin:/usr/bin [ -x $DAEMON ] || exit 0 . /lib/init/vars.sh . /lib/lsb/init-functions do_start() { # Return # 0 if daemon has been started # 1 if daemon was already running # 2 if daemon could not be started start-stop-daemon --start --quiet --exec $DAEMON --test > /dev/null \ || return 1 start-stop-daemon --start --quiet --exec $DAEMON -- -d -C $SEEDD_CONFIG \ || return 2 } do_stop() { # Return # 0 if daemon has been stopped # 1 if daemon was already stopped # 2 if daemon could not be stopped # other if a failure occurred start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --exec $DAEMON } do_reload() { # We don't need this, we have no config to reload, not yet anyway ... # start-stop-daemon --stop --signal 1 --quiet --exec $DAEMON return 0 } case "$1" in start) [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC " "$NAME" do_start case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1; exit 1 ;; esac ;; stop) [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME" do_stop case "$?" in 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; 2) [ "$VERBOSE" != no ] && log_end_msg 1; exit 1 ;; esac ;; reload) log_daemon_msg "Reloading $DESC" "$NAME" do_reload log_end_msg $? ;; force-reload|restart) log_daemon_msg "Restarting $DESC" "$NAME" do_stop case "$?" in 0|1) do_start case "$?" in 0) log_end_msg 0 ;; 1) log_end_msg 1; exit 1 ;; # Old process is still running *) log_end_msg 1; exit 1 ;; # Failed to start esac ;; *) # Failed to stop log_end_msg 1 exit 1 ;; esac ;; status) status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $? ;; *) echo "Usage: $0 {start|stop|reload|force-reload|restart|status}" >&2 exit 3 ;; esac exit 0 bit-babbler-0.9/debian/changelog0000644000000000000000000006156314136173163013531 0ustar bit-babbler (0.9) unstable; urgency=medium * Refactor the udev rules for kernel 4.12 bind events and the mess udev made with handling introduction of a new event type. * Install udev rules and systemd units with make install, and fix the paths in them at configure time. There should be no reason for that needing to be Debian specific anymore. * Ignore -Wgnu-designator at the sites where we use them rather than just disabling it globally. It's good to mark Special Things as being special. * Drop the open coded set of 'hardening flags' from the Debian packaging, we test for and include those as part of the normal build system options. * Mark up exceptions for UBSan's not-actually-undefined tests so that we can apply them to catch any code where that behaviour might be unintentional. We do have deliberate use for Inf and NaN results in some statistics, and for 'implicit' modulo 2^32 in a uint32_t. This makes those more explicit. * Be explicit about more traditionally implicit type conversions too so we can usefully employ warnings about unintended/unanalysed conversions that could be narrowing or reduce precision. * Use RefuseManualStart/RefuseManualStop in seedd-wait.service. It is a(n optional) sequence point for seeding the kernel at boot time, so there is no reason or value in attempting to start or stop it after that. Stopping it could even be actively harmful, as that would also stop any unit that did Require it to complete before they started, even though what they actually Require (a freshly seeded kernel) has been satisfied and will not be changed. * Build-Depends: debhelper (>= 9), debhelper (>= 9.20160709) | dh-systemd We need this little abomination because dh-systemd has been merged with debhelper 9.20160709, but Jessie is still ELTS promised until 30/6/2022, and it only has debhelper 9.20150101+deb8u2 ... So when move-fast-and- break-shit dropped dh-systemd from buster, it broke shit for people who want packages to build on at least all supported releases without silly avoidable busywork ... Closes: #958581 * Enable bbvirt to log to syslog, which is useful to see what happens when it's called from udev rules and the like which don't already log output sent to stderr. * Try to work around systemd deadlocking the boot with libvirt from Bullseye when it starts a zombie .socket unit before the service unit dependencies are satisfied - causing anything which attempts to communicate with it to just hang until the systemd time out "boot ordering bug solver" kicks in. We can only make the race a bit harder to lose here, a real fix involves declaring proper ordering dependencies for the libvirt socket unit (and fixing the networking.service unit to not abandon trying to bring the network up if the ifupdown-pre.service times out). * Chop the NUL terminator off control socket messages before passing them to JSON::XS for decoding, the 4.0 release now considers them trailing garbage rather than just ignoring them like earlier releases did. -- Ron Lee Wed, 27 Oct 2021 17:19:23 +1030 bit-babbler (0.8) unstable; urgency=medium * Support hotplugging devices into libvirt guest domains which have names containing characters that are not valid as part of a shell variable name. Another reminder that the important part of keeping things as simple as possible is always the "as possible" bit. * Support reading seedd(1) options from a configuration file. The original design plan explicitly avoided this, partly just to keep the code as simple and easy to audit as possible, and partly because it was desirable to make invocation as simple and foolproof as possible. The more options that something has, the easier it is to make some mistake with running it which could have subtle and even serious consequences. But we are at the point now where there are enough real alternative options which are either genuinely desirable or needed for some use case, that the balance becomes weighted toward being able to keep persistent configuration settings in a file rather than having to spell them out on the command line each time. The final straw for making this change now was the inability of systemd to sanely support the existing simplified configuration interface that was provided in /etc/default/seedd for the SysV init script. When given the alternative choices available to us of either adding a shell wrapper to do what systemd could not, or forcing people to manually edit or override the systemd unit directly to make any configuration change, this was clearly the Lesser Evil to embrace if we were going to provide a native systemd unit for the system daemon. The former gains us nothing over the existing LSB init script, and the latter would require every user to first have a solid grasp of all the non-obvious consequences which can come into play when configuring a system which (according to systemd.directives(7)) "contains 2464 entries in 13 sections, referring to 241 individual manual pages" - and where even package maintainers and systemd upstream still make mistakes that can take a long time for the real consequences to be noticed. So if we were to provide a systemd unit, it needs to be well tested and give people few, if any, reasons to ever need to modify it. * Preserve existing configuration on package upgrades. The new default configuration file behaves the same way as the old defaults did. If the settings in /etc/default/seedd have been customised, then on upgrade we generate a custom /etc/bit-babbler/seedd.conf implementing the same set of options. The old customised file content will be retained, and can be found in /etc/default/seedd.dpkg-old, in case there was anything else in it which people might also want to keep, but after checking for that it can safely be removed by the system admin. Nothing from this package uses files in /etc/default from this version onward. * Two systemd unit files are now included in this package, but only one is enabled by default. The seedd.service unit provides the same functionality as the SysV init script does, and will be used instead of it on systems where systemd is running as the init process. It will start the seedd(1) daemon as soon as possible during boot, reading its options from the new configuration file, and if feeding entropy to the kernel it will begin doing so as soon as the available USB devices are announced to the system by udev. The seedd-wait.service oneshot unit is not enabled by default. It provides a simple sequence point which may be used to ensure that QA checked seed entropy from available BitBabbler devices can be mixed into the kernel's pool before other ordinary services which might rely upon it are started. This is its default behaviour if it is simply enabled, and ordinarily it will not delay the boot for very long, only until udev announces a device that we can read some good seed bits from. By default this will time out after 30 seconds if good entropy cannot be obtained, which should be more than enough time to get a good seed if that was going to be possible, but won't completely cripple the system when it is acceptable for it to still be running without having a working BitBabbler attached. Additionally, the seedd-wait.service can also be used to place a harder constraint on individual services, if there are particular things which the local admin does not want started at all if good seed entropy was not obtained. Or it can be configured to divert the boot to a degraded mode (such as the single-user mode emergency.target) if the availability of good entropy from a BitBabbler should be a hard requirement for the whole system. For more details of its use see the BOOT SEQUENCING section of the seedd(1) manual page. -- Ron Lee Thu, 08 Feb 2018 10:26:52 +1030 bit-babbler (0.7) unstable; urgency=medium * Handle the oddball case of a RHEL/CentOS 6 kernel being used with libusb version 1.0.13 or later. They backported the USBDEVFS_GET_CAPABILITIES API, but not the patch which was applied to the mainline kernel at the same time to add USBDEVFS_CAP_BULK_SCATTER_GATHER. With the default libusb 1.0.9 that it normally ships with this doesn't matter because none of it is supported there anyway, but updating it makes this become a real problem that we need to deal with. * Test for SIGRTMIN, MacOSX doesn't have it. * Test for pthread_setname_np by signature, it's implemented differently on different platforms. Also support pthread_set_name_np which is what is used by OpenBSD and FreeBSD. * Explicitly set the pthread stack size on (more) platforms where it is tiny by default. We do create some large structures on the stack, and it's probably better to have a consistent size on all platforms than have some of them smash it "unexpectedly". * Querying the string length with strftime is a GNU extension, so only use that where it's actually available. * Provide an implementation of FeedKernelEntropy for MacOS. It does have a documented interface for that, even if the implementation of it that is in its kernel source as of Sierra is ... let's go with enlightening. * Rename _P(), our convenience alias to ngettext, to P_(). On OpenBSD 6.1 _P is defined in ctype.h, and regardless of what you might think of that, symbols starting with an underscore are reserved. So they win this one. * Test for LOG_MAKEPRI, it isn't required by POSIX, and OpenBSD 6.1 doesn't provide it. * Add explicit tests for strtod_l and newlocale. The newlocale function is specified by POSIX, but OpenBSD doesn't provide it. We can get away with falling back to strtod there because it only has very limited support for locales anyway, and right now it will always be either C or en_US.UTF8, both of which use '.' as the decimal point. * Support systems without abi::__forced_unwind for thread cancellation stack unwinding and clean up. The GCC toolchain on OpenBSD doesn't support it, and neither does clang on FreeBSD or MacOS. * Disable thread cancellation around calls to vfprintf on OpenBSD. On that platform it is a cancellation point (as expected), but if cancellation does occur there on 6.1, it can leave its internal _thread_flockfile mutex locked which means any future calls to vfprintf (or anything else needing that lock) will deadlock. We can't easily test for that bug, so we just always provide our own safe cancellation point instead on that platform. * Support unordered_map in both std and std::tr1 namespaces. Normally it isn't available in std:: unless C++11 support is enabled, but clang on FreeBSD provides the tr1 header as a symlink to the std one, and only provides the template in the std namespace. * Test for libusb_has_capability. FreeBSD 11 bumped LIBUSB_API_VERSION to 0x01000102, but didn't add the libusb_has_capability() function which was part of that interface version. It did however add the hotplug API, and it does mostly work, so if the API version is sufficient, but we can't test for this capability, then we'll assume it's available and that it will handle a call to it gracefully if for some reason it really isn't. * Deal with FreeBSD 11 libusb start up and shut down delays. In our testing there was a delay of around 4 seconds before it would report any existing devices when LIBUSB_HOTPLUG_ENUMERATE was used with the hotplug callback (and a similar delay when new devices really were hotplugged later). That makes things awkward for the --scan option, which would see no devices at all before returning its results, unless it too had an arbitrary (and user unfriendly long) delay before responding. So we explicitly enumerate the initial set ourselves on FreeBSD now even when hotplug support is enabled, and handle any duplicates when the hotplug events finally do arrive. Likewise, when libusb_exit is called, it also blocks for around 4 seconds before returning - which delays our code from being able to do a clean exit quickly. There's not much we can do about that one except add some extra debug logging when verbosity is turned up, so that users can see it is not actually our code that has them twiddling their thumbs waiting. Hopefully later releases of FreeBSD will improve on this. * Work around FreeBSD 11 deadlocking on device unplug. If a device is ever removed while we are in the middle of a call to libusb_bulk_transfer(), then that call may deadlock and never return, and the thread which called it will not be able to be cancelled. We can limit the impact that has on our code (since we will already always be cancelling that thread once we get the hotplug notification of the removal) by using pthread_timedjoin_np and if the join times out, bark about it and just ignore the zombie thread that we've been left with. It's not ideal, and if it happens often enough in a single process run, then leaked resources will start to be exhausted. But most people don't replug things all that frequently, this won't happen every time they do, and it is about the best we can do until the FreeBSD bug is fixed. There's no downside to the workaround if the bug doesn't actually occur when a device is unplugged. * Disable some buggy optimisations (normally enabled by -O2) when using GCC on FreeBSD 11. On that platform (we've not seen this anywhere else), they appear to miscompile some code in a way that stack unwinding details are lost and a thrown exception will invoke terminate rather than being caught by the handler it should have unwound to. Some of these were in obscure corners of the code that are only seen when unlikely errors occur, so it's not impossible that there may be a few more lurking. But for now, we'll just disable the known problem ones rather than falling all the way back to compiling with -O0 on that platform. * Add a --limit-max-xfer option to seedd and bbcheck. This gives people a runtime workaround for systems where, despite having an ostensibly new enough Linux kernel and libusb version to support large bulk URBs, the hardware chipset has some quirk that isn't yet fixed in the kernel driver which makes using them troublesome (I'm looking at you RPI3). If passed, this will force the old 16kB limit on individual requests to usbfs. That doesn't have any effect on the size of requests that users can make from our code, it just might be slightly slower at obtaining huge numbers of bits, since we need to get them in smaller chunks internally. * Add an example of how to obtain random integers within an arbitrary range where every value in that range remains equally probable. This isn't a difficult thing to do, but enough people have asked to provide a working example, and there's enough history elsewhere of people doing something naive, like using a modulus to limit the range (thus making some values more probable than others), to have a good example easily available that users can refer to. * Search for where udevadm is found in the qemu-hook script. Its installed path was changed by systemd 204 in Jessie from /sbin to /bin, so now we need to deal with multiple interfaces to stay portable, since there are supported distro releases that aren't EOL yet which still have it in the old location. Jessie's systemd had a compat link to make it available in both places, but the systemd maintainers want to drop that for Buster (and not all other distros with newer systemd provide that). Closes: #852582 * Tweak the udev rules to work around a bug in udev versions up to at least 232-25 which makes testing ATTR keys with != become a perilous folly. If an event occurs for a device which does not have that attribute at all, then the rule will be skipped, the same as if it was actually equal to the value being tested for. -- Ron Lee Mon, 19 Jun 2017 13:31:17 +0930 bit-babbler (0.6) unstable; urgency=medium * Update the libvirt Suggests to include libvirt-clients, since libvirt-bin got split into a bunch of pieces in Jessie (because GNOME, see #679074), and -clients is where virsh has moved to now. * Add configure tests for the functions udev_device_get_tags_list_entry and udev_device_get_sysattr_list_entry which were added in udev 154 and 167 respectively. The RHEL/CentOS 6 release appears to ship with udev 147, so we can't use them there, and it won't be fully EOL for some time yet. That's not a big deal, we only use them to output extra device information when the debug level is cranked up, so we can just omit that code on any systems where they aren't supported. * Include xlocale.h explicitly on platforms where we need that for strtod_l. * Initialise struct addrinfo more portably. There's little point to trying to be clever with static initialisers while g++ has limited support for them in C++ code, just do it in a way that will work everywhere. * Add an explicit guard rather than a platform check around the code to append a wstring as UTF-8 and disable it by default for all platforms now. We don't actually need or use that anywhere here right now, and there are more platforms than MSW where wchar_t is unspecified and/or insane. So we'll worry about enabling it again if we ever do need it here, since we do want this to be portable to those still. * Automatically add -D_REENTRANT for compilers that don't do that themselves when -pthread is used. * Preserve the Chi^2 statistic when long term results need to be normalised to prevent wrap around, and improve the precision on scaling the other metrics to minimise discontinuities, especially on machines with a 32-bit size_t where the normalisation is more likely to be needed in practice. Many thanks to George Tsegas for his extensive testing, and careful and critical attention to questioning all of the results of that. * Fix the framing/status check to handle devices plugged into USB 1.0 ports. Apparently there are still a few of those left in the wild. -- Ron Lee Fri, 17 Jun 2016 23:36:55 +0930 bit-babbler (0.5) unstable; urgency=medium * Add more options to optimise for minimal power consumption. The defaults before now were mostly focussed on keeping a good supply of fresh entropy being regularly mixed into the kernel pool, and on minimising the risk of starvation delays when demand is high. But there's an equally important group of users who not only want good entropy, but also want to minimise idle power usage as much as possible. So we now have some extra tunables to better support that too. The rate at which new entropy is mixed into the kernel pool even when it has not fallen below its low water mark is now directly configurable, as is the rate at which we throttle down requesting more entropy from the hardware when real demand for it falls. Tuning these can minimise how often we are responsible for waking the CPU on an otherwise idle system. It is also now possible to configure the devices to be released when we expect to be idle beyond a given period of time, which will allow them to be powered down and suspended, and only woken again when we do need more entropy from them. There are new udev rules which automatically enable the USB autosuspend feature of the Linux kernel for them when they are plugged in, which means this will work without needing to manually set all that up (unless you want to further tweak the parameters there too). * Don't create the control socket by default when only a limited number of output --bytes are requested. It can still be enabled explicitly if you do want it available while they are being read, but that's normally of fairly limited use, and it's otherwise just annoying to have to remember to explicitly disable it when extracting a block of entropy in this way, and confusing to users if it complains they don't have permission to (re)create it in the default location. * Defer device initialisation until the pool threads have been started. Most users won't really notice any difference from that, but when you have 100 devices in a machine together then even small delays quickly add up to become a thumb twiddling pause if they are serialised rather than being run in parallel. * Better support for pass-through to libvirt managed virtual machines when there is more than one BitBabbler device in the host. This is still more painful than it really ought to be, but we now have a big enough hammer pounding on enough of the rough edges in libvirt support for things to work like USB devices should be expected to work. They can be hotplugged dynamically without admin intervention to the guest machines you want them assigned to, and assigned to guest machines without fragile hacks based on which USB port they are plugged into. -- Ron Lee Wed, 23 Dec 2015 00:38:47 +1030 bit-babbler (0.4) unstable; urgency=medium * Switch to using libusb-1.0 now. It turns out that libusb-0.1 doesn't actually work on kFreeBSD, it only builds there ... which isn't very helpful. The kFreeBSD port actually uses FreeBSD's own libusb which also provides a compatibility API for libusb-1.0 - and we need to jump through a few small extra hoops to use it, but it has the advantage of actually working, which is a plus. This also means we immediately get much better support and lots of bugfixes for non-Debian platforms too, so this should work everywhere that current releases of libusb do now. * Drop libftdi. This is partly a consequence of the above, since a version of it built with libusb-1.0 isn't widely available, and partly a result of realising we weren't really using anything from it that we couldn't just do more easily and more directly though libusb ourselves anyway. Our code ended up being significantly refactored and simplified as a result of this and it opened the way for a number of additional easy improvements too. * Drop the --device-num option for selecting devices. Having an arbitrary enumeration isn't really all that useful in hotplug environments, and the --device-id option now transparently supports selecting devices by their serial number, or by either their logical or physical address on the bus, so the duplication there was only becoming a source of confusion. * More speed and efficiency tuning. As a result of now having more direct control over the device we've been able to notably reduce some of the overheads of streaming data out of it, which means we're now using less CPU cycles with an increase in throughput for the same device clock rate. * Make the libudev build dependency conditional on linux-any so the kFreeBSD buildds will actually want to build it. We can't do much for Hurd until someone actually ports libusb-1.0 to it. * Make the use of signals which may not exist on all platforms conditional, which should enable this to build on MIPS, Sparc, and Alpha too. -- Ron Lee Sat, 05 Dec 2015 04:40:11 +1030 bit-babbler (0.3) unstable; urgency=medium * Include a simple example script for reading from the UDP socket. * Include the documentation for configuring virtual machine pass-through in the binary package as well. * Document how to deal with cgroups mandatory access control when using devices inside libvirt managed virtual machines. * Initial upload to Debian, Closes: #805979 -- Ron Lee Tue, 24 Nov 2015 23:15:24 +1030 bit-babbler (0.2) unstable; urgency=low * Add the option to read entropy directly from a UDP socket too. * Permit TCP to be used for the control socket. Not all platforms have unix domain sockets, and some people might actually want to be able to access it remotely anyway. * Guard the Linux specific code (for feeding the kernel entropy) to only build on Linux, and get it to build with the mingw-w64 cross compiler. * Add a system group bit-babbler, and a udev rule which grants permission to access the device(s) to people in that group. This is mostly useful for people who want to stream bits directly out of the devices and don't need or want the privilege required to be able to write entropy directly to the kernel pool. -- Ron Lee Sat, 27 Jun 2015 01:17:17 +0930 bit-babbler (0.1) unstable; urgency=low * Initial release -- Ron Lee Tue, 24 Feb 2015 09:23:18 +1030 bit-babbler-0.9/debian/compat0000644000000000000000000000000214136173163013042 0ustar 9 bit-babbler-0.9/debian/control0000644000000000000000000000311414136173163013246 0ustar Source: bit-babbler Section: admin Priority: optional Maintainer: Ron Lee Build-Depends: debhelper (>= 9), debhelper (>= 9.20160709) | dh-systemd, libusb-1.0-0-dev, libudev-dev [linux-any], pkg-config Standards-Version: 4.6.0.1 Homepage: http://www.bitbabbler.org Package: bit-babbler Architecture: any Depends: ${shlibs:Depends} Suggests: munin-node, libjson-xs-perl, libvirt-clients | libvirt-bin, acl Description: BitBabbler hardware TRNG and kernel entropy source support This package provides supporting software for the BitBabbler true random number generator hardware. It includes: . - The seedd daemon for running continuous quality and health checks on the active devices and the stream of entropy that they provide, and for seeding the OS kernel entropy pool on demand. - Monitoring tools for short and long term analysis, and alerting of anomalies in both the BitBabbler output and the kernel /dev/random and /dev/urandom output. - Admin tools for real-time inspection of the short and long term quality metrics. . You will need to install libjson-xs-perl if you wish to use the munin-node script for continuous graphing and monitoring of device performance. . You will need to install libvirt-clients if you want hotplug support for adding devices to libvirt managed virtual machines. Package: bit-babbler-dbg Section: debug Architecture: any Depends: bit-babbler (= ${binary:Version}) Description: debugging symbols for BitBabbler tools This package provides the detached debug symbols for software in the bit-babbler package. bit-babbler-0.9/debian/copyright0000644000000000000000000000127014136173163013577 0ustar The bit-babbler package is: Copyright (C) 2003 - 2018, Ron Lee It is distributed according to the terms of the GNU GPL v2. On Debian systems, the complete text of the GNU GPL can be found in /usr/share/common-licenses/GPL-2 The implementation of poz() and pochisq() in include/bit-babbler/chisq.h are (heavily) based on public domain code from John "Random" Walker's ENT test suite, which in turn took it with minimal modification from public domain code by Gary Perlman of the Wang Institute. The original code for ENT can be found at The changes to poz() and pochisq() made in this package are released into the public domain as well. bit-babbler-0.9/debian/rules0000755000000000000000000000436414136173163012733 0ustar #!/usr/bin/make -f # # Copyright 2003 - 2018 Ron Lee. SHELL = /bin/bash export DH_OPTIONS NUM_CPUS = $(shell getconf _NPROCESSORS_ONLN 2>/dev/null) PARALLEL = $(subst parallel=,,$(filter parallel=%,$(DEB_BUILD_OPTIONS))) NJOBS = -j$(or $(PARALLEL),$(NUM_CPUS),1) DEB_BUILD_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE) DEB_HOST_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE) build_arch_stamps = build-shared-stamp install_arch_stamps = install-stamp objdir_shared = objs_sh all_stamps = $(build_arch_stamps) $(install_arch_stamps) config_cache = ../config_deb.cache clean: dh_testdir $(RM) -r $(objdir_shared) $(RM) $(all_stamps) dh_clean $(objdir_shared)/config.status: configure dh_testdir mkdir $(objdir_shared) cd $(objdir_shared) && ../configure --host=$(DEB_HOST_GNU_TYPE) \ --build=$(DEB_BUILD_GNU_TYPE) \ --cache-file=$(config_cache) \ --prefix=/usr build: build-arch build-indep build-arch: $(build_arch_stamps) build-indep: build-shared-stamp: $(objdir_shared)/config.status dh_testdir $(MAKE) $(NJOBS) -C $(objdir_shared) touch $@ install: install-arch install-indep install-arch: build-arch $(install_arch_stamps) install-indep: install-stamp: DH_OPTIONS = -pbit-babbler install-stamp: build-shared-stamp dh_testdir dh_testroot dh_prep $(MAKE) -C $(objdir_shared) install DESTDIR=$(CURDIR)/debian/bit-babbler dh_systemd_enable seedd.service dh_systemd_enable --no-enable seedd-wait.service dh_installinit --restart-after-upgrade --name seedd dh_systemd_start --restart-after-upgrade seedd.service dh_systemd_start --no-start seedd-wait.service dh_installexamples doc/examples/* libvirt/qemu-hook touch $@ binary: binary-arch binary-common: dh_testdir dh_testroot dh_installchangelogs dh_installdocs dh_strip --dbg-package=bit-babbler-dbg dh_compress dh_fixperms dh_installdeb dh_shlibdeps dh_gencontrol dh_md5sums dh_builddeb binary-arch: build-arch install-arch $(MAKE) -f debian/rules DH_OPTIONS="-a" binary-common binary-indep: build-indep install-indep $(MAKE) -f debian/rules DH_OPTIONS="-i" binary-common .PHONY: clean build build-arch build-indep \ binary binary-common binary-arch binary-indep \ install install-arch install-indep bit-babbler-0.9/doc/0002755000000000000000000000000014136173163011171 5ustar bit-babbler-0.9/doc/README.BSD0000644000000000000000000000622314136173163012461 0ustar On OpenBSD, FreeBSD, and MacOS, where clang is now the default compiler, the configure script for the bit-babbler source will prefer to use it (as opposed to the normal autoconf default of always preferring GCC). You can still explicitly select the compiler to use if you wish to, using the normal autoconf idiom, something like: $ ./configure CC=gcc CXX=g++ In either case, whichever toolchain you use, you will still need to build this with gmake, and you will need to have the bash package installed. That said, on OpenBSD, using the default GCC 4.2.1 version is not highly recommended. It is too old to support using #pragma GCC diagnostic inside of functions, and though you could safely remove those statements, or move them outside the function definitions without changing the behaviour of the compiled code - with the extended diagnostic warnings that we build with by default, even the system headers for that toolchain have problems that the compiler will complain about. Given the reasons for not updating that to a later version of GCC, and the change of focus to make clang now the default toolchain for OpenBSD, it seems wrong for us to try and hide those problems with it, and better to just let them be a clear warning against using that version of GCC on OpenBSD anymore. All the attention of OpenBSD developers is on clang now, and so we should follow that there too. For FreeBSD, things were initially a little more ambiguous, it does provide more recent versions of GCC, and its clang toolchain does not support the abi::__forced_unwind exceptions which do proper stack unwinding for threads when they are cancelled, so for FreeBSD 10 we were still defaulting to and recommending people use GCC there - but in the FreeBSD 11 release the GCC toolchain there is also starting to show some signs of neglect. There are problems with its optimiser miscompiling code, which the configure script has some workarounds for to selectively disable known-broken optimisations on FreeBSD, and the 'relro' hardening (which makes portions of the process memory space read-only after relocation) was also broken (which we don't automatically work around, so if you want to use an effected GCC version, and you'll know it's effected if the executables simply always segfault at startup, then you'll need to explicitly pass --disable-relro to configure. See https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=219035#c4 for more details on that). So on balance, it seems the sane thing for FreeBSD now is also to recommend people prefer the clang toolchain, unless they have their own good reasons to choose otherwise. As of FreeBSD 11, hotplugging of USB devices is supported, but the OS level support for unplugging them isn't yet perfect. If a device is removed while it is being actively accessed, then the thread which was performing that access may deadlock inside an OS function call and be leaked. If that happens it won't otherwise interrupt normal operation of our code (and the device can be plugged back in again successfully), but it is something to be aware of if you do plan to remove and replace devices frequently without restarting the application process that is controlling them. bit-babbler-0.9/doc/README.MS-Windows0000644000000000000000000001356314136173163014025 0ustar The bit-babbler source can be built for Windows systems using the mingw-w64 toolchain, which provides implementations for some of the POSIX features that we still use at present, even when we are building for that platform. --------------------------------------------------------------------------- If you trust us to supply pre-built binaries, we make those available for 64-bit Windows: http://www.bitbabbler.org/downloads/msw/bit-babbler-0.9_win64.zip http://www.bitbabbler.org/downloads/msw/bit-babbler-0.9_win64.zip.asc and for 32-bit Windows: http://www.bitbabbler.org/downloads/msw/bit-babbler-0.9_win32.zip http://www.bitbabbler.org/downloads/msw/bit-babbler-0.9_win32.zip.asc Both of those should verify as being signed by the same key that this source package was signed with: pub 4096R/125831AE66E70556 2015-11-23 Key fingerprint = 8EAF 7354 2433 9DDD FE83 5628 1258 31AE 66E7 0556 uid BitBabbler Sales uid BitBabbler Support If you use those, you will still need to follow the instructions below for "Installing it on the Windows host system", to install the WinUSB driver and msvcr110 if your system doesn't already have it, but you can skip the next few sections on building it from source. --------------------------------------------------------------------------- The following describes cross-compiling it on a Debian Stretch system with mingw-w64 4.9.2, but the same instructions should be broadly applicable elsewhere too. To build it natively on a Windows system you will probably also need msys or similar support installed too, for support needed by autotools build systems. First we need a Windows build of libusb-1.0 to be available. Suitable source for that can be fetched from: http://www.bitbabbler.org/downloads/msw/libusb-1.0.20.tar.bz2 Which is an unpatched copy of the upstream 1.0.20 release that has been tested to work - or you can grab whatever the latest release is from the libusb upstream site at: http://libusb.info Which should also work unless they break API or similar. --------------------------------------------------------------------------- Build libusb-1.0 and create an 'installed' copy of it. This does not need to be installed to the system dirs, we can just create a local tree to use: $ cd libusb-1.0.20 $ mkdir build-dir && cd build-dir $ ../configure --host x86_64-w64-mingw32 $ make $ mkdir install-dir $ make install DESTDIR=$(pwd)/install-dir This will install the files needed to build the bit-babbler source in the directory libusb-1.0.20/build-dir/install-dir/usr/local If you need to build this for a different target or toolchain, you can use a different --host, and if needed set CC and CXX to the compiler binaries. For example, to build for a 32-bit Windows system, instead of a 64-bit one as above, use something like: $ ../configure --host i686-w64-mingw32 You can also install it to the system library locations if you wish, but that probably only makes sense if you'll also be using it for other things as well. --------------------------------------------------------------------------- Now you can build bit-babbler: $ cd bit-babbler $ mkdir build-dir $ cd build-dir $ ../configure --disable-shared \ --host x86_64-w64-mingw32 \ LIBUSB_DIR=/home/you/libusb-1.0.20/build-dir/install-dir/usr/local $ make Where '/home/you' is the path to where you unpacked the libusb source. You should now have the needed .exe files that can be transferred to the windows machine for use. The --disable-shared option is optional, but means you won't need to copy the libusb-1.0.dll to the host machine as well, just the .exe files. --------------------------------------------------------------------------- Installing it on the Windows host system: The seedd.exe is the primary application used to obtain random bits from the BitBabbler device(s) in the system, confirm their quality, and make them available to other users. The bbctl.exe can be used to query status information from seedd.exe when it is running. You will also need the msvcr110 runtime library installed. It is no longer included by default in Windows 10 (but it should be already installed in the earlier versions). It can be obtained from here: http://www.microsoft.com/en-us/download/details.aspx?id=30679 You will also need the WinUSB driver installed, and you can either follow the instructions for doing that here: https://github.com/libusb/libusb/wiki/Windows#Driver_Installation Or use the tool provided by: http://zadig.akeo.ie/ Which is what the first link recommends. Once that is all done, you should be able to run 'seedd.exe -s' and it will report the connected BitBabbler device(s) that are found. You can obtain random bits by running either of: seedd.exe --stdout seedd.exe --udp-out localhost:1200 In the first case it will simply stream bits to stdout. In the second a UDP socket will listen on port 1200 of localhost, which can be queried for bits on demand. You can specify any address and port which you please for that. To obtain bits from the UDP socket, you write the desired number of bytes to it as a two-octet network-order short integer. It will then reply with a datagram containing the requested number of bytes of entropy. Requests for 1 to 32768 bytes will be honored as soon as there is sufficient entropy in the internal pool to do so. Requests outside of that range are invalid and will simply be ignored. Note that no access control is placed on this socket, so if it uses a publicly accessible address anyone will be able to read entropy from it (and potentially to use it as a traffic amplifier if requests use a forged source address). There is an example perl script which demonstrates reading from this in the bit-babbler source package, see doc/examples/bbudp.pl --------------------------------------------------------------------------- bit-babbler-0.9/doc/config/0002755000000000000000000000000014136173163012436 5ustar bit-babbler-0.9/doc/config/seedd.conf0000644000000000000000000002045714136173163014377 0ustar # Example configuration file for seedd. # Application-wide options are defined in this section. All options set # here do the same thing as the command line options with the same name. [Service] # Fork to the background and run as a daemon (--daemon). # You should not need (or want) to set this here for a process which is # expected to be managed by systemd or a SysV init script, you should let # their configuration control how it is to be placed into the background. # But you might use this if you want to manually background a separate # process run directly from the command line. #daemon # Feed entropy to the OS kernel (--kernel). kernel # When listening on an IP socket, don't require the needed network interface # to already be up. This allows seedd to be started early, without waiting # for network configuration to occur even when it should listen on a specific # address. If this option is not enabled, then it is a fatal error for the # udp-out service or a TCP control-socket to be bound to an address which is # not already configured when seedd is started. ip-freebind # Provide a UDP socket for entropy output on port 12345 of 127.0.0.1. #udp-out 127.0.0.1:12345 # Where to create the service control socket (used by bbctl and munin etc.). # May be set to 'none' to not create a control socket at all. If this option # is used to change the default control-socket address, then you will also # need to explicitly specify the new address to other tools accessing it too. #control-socket /run/bit-babbler/seedd.socket # Give users in this system group permission to access the control socket. socket-group adm # Request more or less information to be logged about what is going on. # This may be changed on the fly at runtime with `bbctl --log-verbosity` # if the control socket is available. #verbose 3 # Options to configure the entropy collection pool. # You normally shouldn't need to change or set anything here unless you have # very special requirements and know exactly what you are doing and why. #[Pool] # The size of the internal entropy pool in bytes (--pool-size). #size 64k # The device node used to feed fresh entropy to the OS kernel. #kernel-device /dev/random # The maximum time in seconds before fresh entropy will be added to the OS # kernel, even when it hasn't drained below its usual refill threshold. #kernel-refill 60 # Define an entropy collecting group and the size of its pool (--group-size). # The group_number is the integer given after the PoolGroup: string, and is the # value used for the Device 'group' option to assign a device to that group. # Pool groups can be used when multiple BitBabbler devices are available to # optimise for throughput or redundancy. If you only have a single device, # you probably don't need to define any groups explicitly. By default all # devices are placed into group 0 unless otherwise configured. # Any number of pool groups that are needed may be defined. #[PoolGroup:0] # size 64k #[PoolGroup:1] # size 64k # This section configures the defaults to use for all BitBabbler devices which # don't override them in a per-device section (or on the command line). # All options set here do the same thing as the command line options with the # same name when passed before any --device-id option. #[Devices] # The rate in bits per second at which to clock raw bits out of the device. #bitrate 2.5M # Override the calculated value for the USB latency timer. #latency 5 # Set the number of times to fold the BitBabbler output before adding it to # the pool. The default for this depends on the device type. White devices # default to folding just once, Black devices with only a single generator # will fold 3 times to emulate the four generators on the White devices. #fold 3 # The entropy PoolGroup to add the device to. #group 0 # Select a subset of the generators on BitBabbler devices with multiple # entropy sources. The argument is a bitmask packed from the LSB, with each # bit position controlling an individual source, enabling it when set to 1. # There is usually no good reason to mask generators in normal use, the main # use case is to verify the output of each generator separately when testing. #enable-mask 0x0f # Configure how devices back off from generating entropy at the maximum rate # when it is not actually being consumed by anything. When the pool first # becomes full, we will pause for the 'initial' number of milliseconds, # doubling that delay each time we wake with the pool still full up to the # maximum value. As a special case, if the max value is 0 then further reads # from the device will be suspended indefinitely once the delay reaches 512ms # until the pool is no longer full. #idle-sleep 100:60000 # The threshold in milliseconds where if we expect the device to be idle for # longer than that, we will release our claim on it, allowing the OS to put # it into a low power mode while it is not being used. A value of 0 means # we will never release the claim unless seedd is halted or it is unplugged. #suspend-after 0 # Enable options for better power saving on lightly loaded systems. # This is equivalent to using: # -kernel-refill=3600 --idle-sleep=100:0 --suspend-after=10000 # Which should be a reasonable balance of allowing the system to suspend as # much as possbile when idle while keeping sufficient fresh entropy on hand # for when it is needed. #low-power # Limit the maximum transfer chunk size to 16kB. This is a workaround for # buggy USB chipsets (and their drivers) which still do exist on some # motherboards and have trouble when larger transfers are used. The impact # on transfer speed of this is relatively minimal, but you normally wouldn't # want to enable this unless you actually see real problems without it. #limit-max-xfer # Disable gating entropy output on the result of quality and health checking. # You almost never want to use this option at all, and even less so in a # configuration file for a system daemon. The main reason this option exists # at all is for generating streams which are to be analysed for quality by # some external test suite - in which case we definitely don't want to be # filtering any bad blocks of bits from what it would see. But you'd never # want to do this for any 'normal' use where good entropy is always assumed. # Because of that, this *only* disables the QA gating on bits output via # stdout. And we do support it in this configuration file as that may be a # convenient way to record the configuration which is used for such testing. #no-qa # Sections with a Device: prefix can be used to both enable and configure # individual devices. The following is the equivalent of passing the command # line option --device-id=XYZZY and if any options are specified for this # section, that is equivalent to passing them after the --device-id option in # that they will only apply to this device and no others. All the options for # the [Devices] section above may be used here. # # If no [Device:] sections are defined, the default is to operate on all of # the devices which are available. It is not an error to define a section for # a device which is not, or may not be, present at any given time. #[Device:XYZZY] # [Watch:] sections can be used to run our QA testing on some other external # source of random bits, provided we can read them as if they were a file or # named pipe or device node. Any number of Watch sections may be defined, # each just needs its own unique label after the Watch: prefix to identify it. # # For example, the following will run QA testing on the /dev/urandom device, # reading a block of 64kB every 500ms for analysis. The results of that # analysis are available from the control socket in the same way as the QA # reports on bits from the BitBabbler devices and the internal pools are. #[Watch:urandom] # The path to the device/pipe/file to read bits from. This must be set. #path /dev/urandom # How long to wait before reading the next block of bits. Default 0. #delay 500 # The number of bytes to read between each delay period. Default 64kB. #block-size 64k # The maximum number of bytes to read in total. Default 0 implies reading # an 'infinite' number of bits for as long as the process keeps running, # which is probably what you usually want when using this. #max-bytes 1G bit-babbler-0.9/doc/examples/0002755000000000000000000000000014136173163013007 5ustar bit-babbler-0.9/doc/examples/bbudp.pl0000755000000000000000000000253714136173163014450 0ustar #!/usr/bin/perl -w # A trivial example for obtaining random bits from the seedd UDP socket. # It assumes that the option '--udp-out 127.0.0.1:1200' was passed to a # runing instance of seedd. # # It takes one command line parameter, the desired number of random bytes, # and will output them formatted as hexadecimal digits. For example: # # $ ./bbudp 10 # read: 8c46b4d2a9a1424cd587 # # This file is distributed as part of the bit-babbler package. # Copyright 2015, Ron use strict; use IO::Socket; my $addr = '127.0.0.1'; my $port = 1200; my $max_msg_size = 32768; my $data; my $flags; my ($bytes_requested) = (shift // "") =~ /^(\d+)$/a or die "Usage: $0 \n"; die "Not reading 0 bytes\n" if $bytes_requested < 1; die "Maximum request is $max_msg_size\n" if $bytes_requested > $max_msg_size; my $sock = IO::Socket::INET->new( Proto => 'udp', PeerAddr => $addr, PeerPort => $port, ) or die "Could not create socket: $!\n"; # Send the requested number of bytes as a network-order short. my $msg = pack("n*", $bytes_requested); $sock->send($msg) or die "Failed to send request for $bytes_requested bytes: $!\n"; $sock->recv($data,$max_msg_size,$flags) or die "Failed to read datagram reply: $!\n"; # Display the binary octets as hex digits print "read: " . unpack("H*", $data) . "\n"; bit-babbler-0.9/doc/examples/random_int.pl0000755000000000000000000003217614136173163015510 0ustar #!/usr/bin/perl -w # This is mainly an example of one way to correctly generate random numbers in # an arbitrary integer range from entropy obtained by a BitBabbler. Since the # BitBabbler itself just outputs a continuous stream of random bits, some care # is needed if you want random numbers in a range which isn't a perfect power # of two, but still require every value to have an equal probability of being # selected. # # History gives us plenty of examples where naive attempts at generating random # numbers in some range (like people applying a simple modulus to the output of # rand(3) or a similar function) create an exploitable, or at least undesirable # bias - where some numbers will be selected more or less frequently than would # be expected by chance. So it's worth having an example of how to do it right. # # The algorithm used here is not the most efficient known in terms of the input # entropy potentially needed to generate each number that is output - but it is # very simple and easy to not get wrong in your own code, and unless you need a # really large quantity of numbers quickly, the proportion of raw entropy which # may be wasted here won't usually be a cause for concern when the rate that we # can create it means it's not a scarce resource. # # The basic operation is quite trivial. Obtain enough bits from the BitBabbler # to give us a number that could be larger than the desired range. Check if it # is within the desired range. If it is, we're done. If it is not, we discard # those bits then obtain another set. Repeat until you have as many numbers as # you require. # # For a range that does not have zero as its smallest number, we can optimise a # little to only require enough bits to cover the distance between the smallest # number in the range and the largest one. # # The actually interesting parts of this example for that are all found in the # request_entropy() function, which gets raw bits from the BitBabbler via the # UDP socket interface of seedd, and the get_next_number() function, which does # the transformation to the desired range. (all the rest is mostly just sanity # checking the command line request and a self-test to sanity check the output) # # This example assumes seedd is providing a UDP socket on 127.0.0.1, port 1200, # (i.e. '--udp-out 127.0.0.1:1200' was passed to a running instance of it) and # has two main modes of operation: # # - If invoked with up to three command line arguments, it will output numbers # in the selected range. # # random_int.pl max [min] [count] # # Where: # # max: is the largest number (inclusive) in the desired range. # This argument must be provided. # # min: is the smallest number (inclusive) in the desired range. # If not provided, it will default to 0. # # count: is the quantity of numbers wanted in this range. # If not provided, it will default to 1. # # The generated numbers will be sent to stdout, separated by newlines. # All other output is sent to stderr. # # - If invoked with a fourth command line argument, it will operate in a self # test mode. # # random_int.pl max [min] [count] [test-count] # # In this mode, the 'count' argument is ignored, and enough numbers will be # generated in the min - max range to expect that an average of 'test-count' # occurrences of each possible value will be seen. # # A report will then be generated to stdout which shows the actual number of # occurrences seen for each value, and the spread of frequencies with which # the individual values were seen. # # On its own, this isn't a rigorous test of the quality of the distribution # of the output, but combined with the QA checking of the raw entropy that # is done by seedd, it should give an easy visual indication of any serious # problem that may be present in the transformation code for a given range. # # When all output is completed, the amount of entropy 'wasted' in obtaining the # desired numbers will be shown (also to stderr). On average it is expected # that this will be proportional to how far from being a power of two the range # of desired numbers is. The greater that distance, the higher the probability # is that we will randomly get (and so discard) a number that is outside of the # requested range. # # # This file is distributed as part of the bit-babbler package. # Copyright 2017, Ron use strict; use IO::Socket; use POSIX qw(ceil); use List::Util qw(reduce max); # The seedd UDP socket to connect to, and maximum allowed packet size. my $addr = '127.0.0.1'; my $port = 1200; my $max_msg_size = 32768; # Get max, min, and counts from the command line with some sanity checking. my ($max_val) = (shift // "") =~ /^(\d+)$/a or die "Usage: $0 [min] [count] [test-count]\n"; my ($min_val) = (shift // 0) =~ /^(\d+)$/a; my ($count) = (shift // 1) =~ /^(\d+)$/a; my ($testcount) = (shift // 0) =~ /^(\d+)$/a; die "Max value ($max_val) must be greater than min value ($min_val).\n" unless $max_val > $min_val; die "Count should be greater than 0 if you want more than this message.\n" unless $count > 0 || $testcount > 0; # Define some math convenience functions for the calculations we need to do. sub log2($) { return log(shift) / log(2); } sub log10($) { return log(shift) / log(10); } # Return the minimum number of bits needed to represent an integer value. sub bits_needed_for($) { # +1 because we want the number of bits needed to store the given value # not that number of values counting from zero. return ceil(log2((shift) + 1)); } # Return the number of bytes needed to hold some number of bits. sub bytes_needed_for($) { return ceil((shift) / 8); } # Return the number of decimal digits needed to output an integer value. sub digits_needed_for($) { return ceil(log10((shift) + 1)); } # Calculate how much entropy we need to obtain a number in the desired range # and how to manipulate it into that range. my $range = $max_val - $min_val; my $nbits = bits_needed_for($range); my $nbytes = bytes_needed_for($nbits); my $downshift = $nbytes * 8 - $nbits; # Tell the user what we are going to do. if ($testcount) { warn "Testing $testcount value" . ($testcount > 1 ? 's' : '') . " per bin between $min_val and $max_val.\n"; } else { warn "Requested $count value" . ($count > 1 ? 's' : '') . " between $min_val and $max_val.\n"; } # Do some final sanity checking on what was requested. die "Not reading 0 bytes\n" if $nbytes < 1; die "Maximum request is $max_msg_size\n" if $nbytes > $max_msg_size; # And report some detail about how we're going to do it. warn "Need to read $nbits significant bits ($nbytes bytes >> $downshift)" . " for range of $range.\n"; # We're ready, let's do this. Create a socket for obtaining entropy. my $sock = IO::Socket::INET->new( Proto => 'udp', PeerAddr => $addr, PeerPort => $port, ) or die "Could not create socket: $!\n"; # And define a convenience function to read some amount of entropy from it. sub request_entropy($) { my $bytes_requested = shift; my $data; my $flags; # Send the requested number of bytes as a network-order short. my $msg = pack("n*", $bytes_requested); $sock->send($msg) or die "Failed to send request for $bytes_requested bytes: $!\n"; $sock->recv($data,$max_msg_size,$flags) or die "Failed to read datagram reply: $!\n"; # And return them as a block of binary data. return $data; } # Keep some statistics on how efficiently we obtained the desired numbers. my $requests_made = 0; my $attempts_needed = 0; # This function is the actual meat of this example, turning raw entropy that is # read from the BitBabbler into a number within the desired range with an equal # probability for obtaining every number in the range. sub get_next_number() { my $padding = pack('C8', 0); ++$requests_made; # Loop until we get a number that is in the requested range. # In theory, the number of requests this might take is unbounded, but in # practice, on average, the number of requests needed is proportional to # how far the requested range is from being the next larger power of 2. # (With enough samples, the average will converge on that proportion ever # more precisely, in the same way that Monte Carlo estimation of Pi does). while(1) { ++$attempts_needed; # Request the amount of entropy we need for the size of the range. my $entropy = request_entropy($nbytes); # Unpack the binary data as an unsigned "quad" (64 bit), little-endian # number, adding enough trailing (most significant bit) padding to the # entropy to ensure that we have at least 64 bits of data to unpack. # Then shift those bits to the right if needed, to obtain the smallest # (non byte aligned) number of bits needed to cover the whole range. # Finally, add the minimum value to that to put the result between the # desired floor and a ceiling greater than or equal to the maximum # value wanted. # # (For those who don't normally speak perl, the 'unpack' here is just # the equivalent of casting the raw data bits to a uint64_t type) my $n = (unpack("Q<", $entropy . $padding) >> $downshift) + $min_val; # Return the result if it is not larger than the range maximum (we # already know that it must be at least the minimum requested value). return $n if $n <= $max_val; # Otherwise, try again until we succeed. #warn " rejected $n\n"; } } # Finally, is this a test-run, or a request to output some numbers ... if ($testcount) { # We're in self-test mode to produce some statistics about the distribution # of numbers that are actually obtained in the selected range. We don't # do any numeric analysis here of whether the reported statistics are in a # normally expected range, but assuming that the raw entropy really is good # (which is already tested by the QA done in seedd), we would expect that # any bug in the transformation done here to map them to the desired range # should normally show up as a glaringly obvious glitch just by eye in what # we do already report here. This is just a way for people to quickly # reassure themselves that things probably are in fact working as expected. my $numbins = $range + 1; my $trials = $numbins * $testcount; my %bin; my %counts; warn "Collecting $trials test results, please wait ...\n"; while ($trials--) { # Show a progress spinner, because this could take a while # depending on the range and number of trials chosen for it. print STDERR "\rRemaining trials $trials " if $trials % 1000 == 0; ++$bin{get_next_number()}; } warn "\n\n"; # For each number in the desired range that was obtained, show how many # times it was returned in this trial. On average each number should be # seen roughly $testcount number of times. print " Frequency of each value:\n"; print " Note: not all values were observed at least once.\n" if $numbins != scalar(keys %bin); # Find the number of digits needed to align output with the maximum value my $dmax = digits_needed_for($max_val); for ($min_val .. $max_val) { if (exists $bin{$_}) { push @{$counts{$bin{$_}}}, $_; printf " %*d: %d\n", $dmax, $_, $bin{$_}; } else { # Report values that did not occur at all in the counts table, # but don't print them in this list. push @{$counts{0}}, $_; } } print "--------------------------\n\n"; # Find the number of digits needed to nicely format the frequency count # and the number of values that were seen with each frequency. my $maxcount = reduce { my $n = scalar(@{$counts{$b}}); $a > $n ? $a : $n } 0, keys %counts; my $dcount = digits_needed_for($maxcount); my $dfreq = digits_needed_for(max keys %counts); # For each number of times a value was seen, show the values that were # seen that number of times. For a sufficiently large number of trials # the number of values at each frequency should be normally distributed # around the expected average frequency of $testcount. print " Values at each frequency:\n"; for (sort { $a <=> $b } keys %counts) { printf " %*d: (%*d)", $dfreq, $_, $dcount, scalar(@{$counts{$_}}); printf " %*s", $dmax, $_ for @{$counts{$_}}; print "\n"; } } else { # Otherwise, just output the list of numbers requested with no other frills. while ($count--) { print get_next_number() . "\n"; } } # Give a final report on the rate of rejected attempts in the given range. printf STDERR "\nAverage number of attempts needed to obtain each number: %0.4f\n", $attempts_needed / $requests_made; # Or the same thing alternatively stated: #printf STDERR "\nProportion of rejected attempts for this numeric range: %0.2f%%\n", # (1 - $requests_made / $attempts_needed) * 100; # vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/doc/man/0002755000000000000000000000000014136173163011744 5ustar bit-babbler-0.9/doc/man/bbcheck.10000644000000000000000000002160114136173163013405 0ustar .\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BBCHECK 1 "May 29, 2017" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bbcheck \- run automated tests on BitBabbler hardware RNG devices .SH SYNOPSIS .B bbcheck .RI [ options ] .SH DESCRIPTION The \fBbbcheck\fP utility is is tool for quickly and simply analysing the output of a BitBabbler RNG at various bitrates. It can run tests on multiple devices in parallel, and highlight the best and worst results from a series of tests on each device. .SH OPTIONS The following options are available: .TP .B \-s, \-\-scan Scan the system for available BitBabbler devices. .TP .BI "\-i, \-\-device\-id=" id Select a BitBabbler device to analyse by its unique ID. If no devices are explicitly specified then the default is to use all of them which are present when testing begins. This option may be passed multiple times to test multiple devices. The \fIid\fP may be the device serial number, or its logical address in the form: [\fIbusnum\fP:]\fIdevnum\fP or on systems where knowing the USB topology is supported, its physical address in the form \fIbusnum\fP\-\fIport\fP[.\fIport\fP\ ...] For a logical address the \fIbusnum\fP part is optional, but if \fIdevnum\fP is not unique across all buses, then exactly which device will be selected if it is not fully specified becomes a matter of chance. All of the available IDs which can be used to refer to a device will be reported by the \fB\-\-scan\fP option. Bus, device, and port numbers are expected to be decimal integers. Since \fBbbcheck\fP only operates on available devices and does not wait for a device to be hotplugged, it is an error to specify an ID which does not refer to a device currently available in the system. .TP .BI "\-r, \-\-bitrate=" min[:max] Select the bitrate, or range of bitrates, to analyse (in bits per second). The available bitrates are determined by an integer clock divider, so not every rate is exactly achievable. An unsupported rate will be rounded up to the next higher rate. For convenience the rate may be followed by an SI multiplier (eg. 2.5M for 2500000). If a colon separated range is specified, then all possible bitrates between \fImin\fP and \fImax\fP will be tested. .TP .BI "\-b, \-\-bytes=" n The number of bytes to analyse for each test. A suffix of 'k', 'M', or 'G' will multiply \fIn\fP by the respective power of two. .TP .BI "\-B, \-\-block\-size=" bytes The block size used for folding. This size must be a multiple of 2^n, where n is the level of folding used (ie. it must be able to fold the desired number of times without any remainder). A suffix of 'k', 'M', or 'G' will scale \fIbytes\fP by the respective power of two. Default is 64kB. A larger block size will mix samples taken over a longer timescale. At high bitrates this will mean more of the lower frequency noise can be factored into each sample where otherwise the higher frequency noise would dominate. The optimum block size (beyond which any change to the result is negligible) for any given bitrate is still a matter that is ripe for further study. See the \fB\-\-fold\fP option below for a more detailed description of folding. .TP .B \-A, \-\-all\-results Show all the test results, not just the final summary. .TP .B " \-\-no\-colour" Don't colour the final results. By default the four best results will be highlighted bright-green, dull-green, yellow, and orange, while the worst result will be highlighted in red. This option suppresses the output of terminal escape codes which are responsible for that. .TP .B \-v, \-\-verbose Make more noise about what is going on internally. If used (once) with the \fB\-\-scan\fP option this will show more information about each device, but otherwise it's mostly only information useful for debugging. It may be passed multiple times to get swamped with even more information. .TP .B \-?, \-\-help Show a shorter version of all of this, which may fit on a single page. .TP .B " \-\-version" Report the \fBbbcheck\fP release version. .SS Per device options The following options may be used multiple times to individually configure each device when more than one BitBabbler is available. If passed before any \fB\-\-device\-id\fP option, then they set new default values which will apply to every device. If passed after one of those options they will only be applied to the immediately preceding device. .TP .BI " \-\-latency=" ms Override the calculated value for the USB latency timer. This controls the maximum amount of time that the device will wait if there is any data in its internal buffer (but less than a full packet), before sending it to the host. If this timer expires before a packet can be filled, then a short packet will be sent to the host. The default value is chosen to ensure that we do not send more short packets than necessary for the selected bitrate, since that will increase the number of packets sent and the amount of CPU time which must be spent processing them, to transfer the same amount of data. Unless you are experimenting with changes to the low level code, there is probably no reason to ever use this option to override the latency manually. .TP .BI "\-f, \-\-fold=" n Set the number of times to fold the BitBabbler output before analysing it. Each fold will take the first half of the block that was read and XOR it with the bits in the second half. This will halve the throughput, but concentrate the available entropy more densely into the bits that remain. There are two main things this is expected to do based on the BitBabbler design. It will better mix the low-frequency noise that is captured with that of the higher frequencies, allowing it to sample at higher bitrates without narrowing the noise bandwidth available to influence adjacent bits. It will help to break up any transient local correlations that might occur in the physical processes from which ambient environmental noise is collected. Folding should never reduce the real entropy of each sample, but when all is working exactly as it should, it may not do anything to increase it either. Mathematically, an XOR summation is expected to exponentially smooth any bias in a stream of independent bits, with the result having at least as much entropy as the least predictable of either of the two inputs (in the same way that a one time pad is no less secure despite the plaintext having much less entropy than the pad does). .TP .BI " \-\-enable\-mask=" mask Select a subset of the generators on BitBabbler devices with multiple entropy sources. The argument is a bitmask packed from the LSB, with each bit position controlling an individual source, enabling it when set to 1. As a special case for \fBbbcheck\fP, if a \fImask\fP of 0 is used, then the tests will be performed on each source unit individually. A \fImask\fP of 16 (0x10) will first test each source individually, and then all of of them enabled together as well. .TP .B " \-\-limit\-max\-xfer" Limit the maximum transfer chunk size to 16kB. On Linux, prior to kernel 3.3, individual bulk transfer requests were somewhat arbitrarily limited to 16kB. With kernels later than that, larger transfers were supported, and we will make use of those to optimise transfer speeds for large requests at high bitrates when a new enough kernel and libusb with support for this are both available. Unfortunately, the USB chipsets on some motherboards are still buggy when these larger transfers are used, and there is no easy way for us to automatically detect those, since the symptoms they exhibit do vary and aren't always directly visible to our code. Ideally any problems like that should be reported to the kernel maintainers, where they can be fixed, or worked around for device specific quirks, but this option allows you to explicitly request that the transfer size be limited on machines where you are experiencing problems with that. If in doubt, it is safe to use this option on any system, the impact on transfer speed is relatively minimal unless you are trying to obtain huge numbers of bits as quickly as possible. But it's not the default, since at present only a very small number of systems are still known to be affected, and that number should continue to decrease over time. .SH SEE ALSO .BR seedd (1), .BR bbctl (1). .SH AUTHOR .B bbcheck was written by Ron . You can send bug reports, feature requests, praise and complaints to support@bitbabbler.org. bit-babbler-0.9/doc/man/bbctl.10000644000000000000000000001507614136173163013123 0ustar .\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BBCTL 1 "January 24, 2018" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bbctl \- query and control tool for BitBabbler hardware RNG devices .SH SYNOPSIS .B bbctl .RI [ options ] .SH DESCRIPTION The \fBbbctl\fP program can be used to issue command requests to the control socket of software controlling a BitBabbler device (such as the \fBseedd\fP(1) daemon). .SH OPTIONS The following options are available: .TP .B \-s, \-\-scan Scan for active devices. This will report the device identifiers which can be queried from the owner of the control socket. .TP .BI "\-i, \-\-device\-id=" id Act on only the specified device. If no devices are explicitly specified then the default is to act upon all of them. This option may be passed multiple times to act on some subset of the available devices. The \fIid\fP must be an identifier name as reported by \fBbbctl\ \-\-scan\fP, you cannot use device logical or physical addresses here. .TP .B \-b, \-\-bin\-freq Report the 8-bit symbol frequencies. .TP .B \-B, \-\-bin\-freq16 Report the 16-bit symbol frequencies. .TP .B " \-\-bin\-count" Report the 8-bit symbol counts. Similar to \fB\-\-bin\-freq\fP except the bins are reported in symbol order instead of sorted by frequency. .TP .B " \-\-bin\-count16" Report the 16-bit symbol counts. Similar to \fB\-\-bin\-freq16\fP except the bins are reported in symbol order instead of sorted by frequency. .TP .BI " \-\-first=" n Show only the first \fIn\fP results. Useful when you don't want to actually see all 65 thousand entries for the 16-bit bins. The default (if neither this nor the \fB\-\-last\fP option are specified) is to report everything in its full glory. Don't say I didn't warn you. .TP .BI " \-\-last=" n Show only the last \fIn\fP results. Useful when you don't want to actually see all 65 thousand entries for the 16-bit bins. If used together with the \fB\-\-first\fP option, then both the requested head and tail of the results will be shown. .TP .B \-r, \-\-bit\-runs Report on runs of consecutive bits. .TP .B \-S, \-\-stats Report general QA statistics. .TP .BI "\-c, \-\-control\-socket=" path The filesystem path for the service control socket to query. This can belong to any process that supports the BitBabbler control socket interface and for which the user running \fBbbctl\fP has permission to connect to. An address of the form \fItcp:host:port\fP may be used if the control socket is bound to a TCP port rather than a unix domain socket path. The \fIhost\fP part can be a DNS hostname or address literal. If an IPv6 address literal is used it should be enclosed in square brackets (e.g. tcp:[::1]:2020 to bind to port 2020 on the local IPv6 interface). The \fIport\fP can be a port number or a service name (as defined in \fI/etc/services\fP or other system name-service databases which are queried by \fBgetaddrinfo\fP(3)). .TP .BI "\-V, \-\-log\-verbosity=" n Change the logging verbosity of the control socket owner. .TP .BI " \-\-waitfor=" device : passbytes : retry : timeout This option will make \fBbbctl\fP wait before exiting until the \fBseedd\fP(1) QA checking reports that at least \fIpassbytes\fP of good entropy have been obtained from the given \fIdevice\fP. It will check for that every \fIretry\fP milliseconds, waiting for a maximum of \fItimeout\fP milliseconds before failing. The \fIdevice\fP is a QA test identifier as reported by \fB\-\-scan\fP, and must be provided, as must the expected \fIpassbytes\fP count. The \fIretry\fP time is optional, and if not specified it will default to 1000 milliseconds. If the \fItimeout\fP is 0 (or not explicitly passed), then this will wait for an unbounded amount of time for the requested condition to occur. The \fIpassbytes\fP, \fIretry\fP, and \fItimeout\fP parameters may be suffixed with an SI multiplier (e.g. k, M, G) as a convenience, so a \fItimeout\fP of 30k would wait for 30 seconds. This option may be passed multiple times to wait for multiple devices, and the given conditions for each of them will be tested for in the order that they are specified on the command line. i.e. Later conditions will not be tested for at all until all prior ones have been met, and the \fItimeout\fP clock for each test only begins after the previous test has successfully completed. When all required conditions pass, \fBbbctl\fP will report success with an exit code of 0. If a \fItimeout\fP is exceeded, or any other error occurs which means the test cannot be successfully completed (like passing a \fIdevice\fP which does not exist, or querying a \fB\-\-control\-socket\fP which no process provides), then a non-zero exit code will be returned. This option mostly exists to make it possible to delay or even prevent other services from starting until a sufficient amount of entropy has been obtained to feel comfortable that they can operate securely or as intended. See the notes on \fBBOOT\ SEQUENCING\fP in \fBseedd\fP(1) for more details on that. It may be used for other purposes too, but note that \fIpassbytes\fP is an absolute measure of the number of good bytes seen since \fBseedd\fP was started, it is not relative to the number that were obtained prior to executing this request. .TP .B \-v, \-\-verbose Make more noise about what is going on internally. It may be passed multiple times to get swamped with even more information. .TP .B \-?, \-\-help Show a shorter version of all of this, which may fit on a single page. .TP .B " \-\-version" Report the \fBbbctl\fP release version. .SH FILES .TP .I /run/bit\-babbler/seedd.socket The default \fB\-\-control\-socket\fP path if not explicitly specified. This may be under \fI/var/run\fP on platforms which don't (yet) provide a \fI/run\fP top level directory (or a TCP socket on platforms which don't support unix domain sockets). It is set at compile time by \fBSEEDD_CONTROL_SOCKET\fP. .SH SEE ALSO .BR seedd (1). .SH AUTHOR .B seedd was written by Ron . You can send bug reports, feature requests, praise and complaints to support@bitbabbler.org. bit-babbler-0.9/doc/man/bbvirt.10000644000000000000000000004346114136173163013324 0ustar .\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH BBVIRT 1 "January 2, 2018" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME bbvirt \- hotplug BitBabbler devices into libvirt managed domains .SH SYNOPSIS .B bbvirt .I action .RI [ options ] .BR "bbvirt attach" | detach .I device .RI [ options ] .BR "bbvirt attach\-all" | detach\-all .RI [ domain ] .RI [ options ] .SH DESCRIPTION The \fBbbvirt\fP program is an attempt to take some of the pain out of what is currently required to distribute multiple USB devices between the host and guest virtual machines. While there are several ways in which this may be configured and managed, at present none of them actually provide a complete and coherent solution on their own, all of them fall short of the mark in some significant and annoying way. The aim here is to piece together enough of those hacks to actually get all of the functionality that we want now, until the libvirt native support for this improves enough to not need it anymore. At present this deals with libvirt managed QEMU/KVM virtual machines. .SS What do we want? The ideal behaviour here is pretty simple. Given some arbitrary number of BitBabbler devices, we should be able to assign them to either the host machine, or to a guest VM running on it, and once we do that they should behave in the normal manner expected of any USB device. .IP - 2 If they are plugged in when the guest machine is started, they should be seen by that machine as they would be by the host. .IP - 2 If they are plugged in after the machine is started, they should be hotplugged into that machine as they would be on the host. .IP - 2 If they are unplugged while the machine is running, they should be cleanly removed from it, as they would be on the host. .SS Why can't we have it? Right now, libvirt gives us two ways that we can assign USB devices from the host to a guest domain. .IP - 2 We can assign them by their USB vendor and product ID. But that only works when there is just a single device of that type in the host. Which is pretty useless in most of the cases that we care about here, where the host and each of the guests are likely to have one or more BitBabbler devices of their own assigned to them. .IP - 2 We can assign them by their logical address on the USB bus. But that isn't a constant that we can statically configure for the domain. Every time a device is plugged in, or replugged, or reset, or the host machine is rebooted, that address is likely to change since it is dynamically allocated when the device is enumerated on the bus. .PP There is a third way, but it relies on bypassing the normal libvirt configuration to make direct use of the QEMU ability to assign a device by its physical address on the bus. Which is better, but still not a magic bullet since it relies on plugging exactly the same devices into exactly the same ports every time (and on having those ports enumerated in the same way by the host on every reboot, which isn't guaranteed either). It also forces us to jump through other hoops, since we then need additional complication to manage the access permissions of the device manually outside of libvirt, but still in coordination with it. The even bigger failing, which all of those methods have in common, is they all depend on the device already being plugged in before the guest is started. If it is inserted after the guest is started, or removed and replugged while the guest is running, or if the host bus or a hub bounces causing a reconnect, then the device will not be (re)attached to the guest. The only way to fix that if it happens is to manually reattach the device with an arcane incantation in XML (which relies on you knowing the new address of the device), or to completely power down and restart the guest. Not the pinnacle of user-friendly operation that we are looking for here. .SS What can we do about it? There was a patch submitted to libvirt some years back which would have allowed a device to be specified by both its USB product ID and its serial number, but that got some push-back, and so far has still not been applied upstream. That would have gone a long way toward making this both easy and clean, leaving us only with the hotplug aspect to deal with. We'll leave grumpy snark about that as an exercise for the reader\ ... Another alternative is we can delegate finding the device's logical address to a hotplug manager like \fBudev\fP(7). This is attractive in the sense that we can know when the address of a device changes and what it changes to, but \fBudev\fP itself isn't very friendly to the idea of local admin customisation (while it is possible to do, it seems to be getting increasingly strongly discouraged) and using it still requires some external glue to translate its events into something that libvirt can act on to configure the guest machine. The \fBbbvirt\fP program provides that glue, and a user friendly method of assigning which devices should belong to which guest domains, and a front end that can be invoked manually or by other admin controlled tasks to quickly and easily add or remove BitBabbler devices from any of the running guest machines. But the limitation this approach has, is that it can't easily know when a guest machine is started which should have devices that are already plugged in added to it. In theory we could add them to its persistent domain definition, but that has its own problems because we can only add devices by their ephemeral logical address, and we can't guarantee that we will get called to remove them from the domain again when that address becomes invalid (like if the host is suddenly powered off or it is otherwise not cleanly shut down), so we could end up with many stale entries accumulating in the persistent domain configuration, which could later match some completely different device to what we had wanted attached to it. Which means until that somehow gets fixed, it's only safe to add them to a live guest domain, so that they will always be removed again when it is halted, no matter how it ended up getting halted. Clearly we've still got some way to go to get to our ideal here. .SS What if we hit it with *two* hammers? There appears to be only two ways that we can get notified of a guest machine being started at present. One involves running yet another daemon process, which would do little more than just sit around waiting for someone to start a guest so it could tell us about that. But then we'd have yet another thing to configure, yet another process running, and yet more problems with figuring out how to ensure we don't lose a race when the host is booted, between getting the initial set of device events, that process being ready and active, and any guests that will be autostarted at boot actually starting. The other way is to use a libvirt hook. Which in turn has the problem of not actually allowing us to run any libvirt functions from it, which we need to do in order to attach the device to the host. And which we can't guarantee that we can just install by default, because there can be only one such hook on the system, which the local admin may already be using ... There is a third way, but that would involve requiring the local admin to start all guest machines through a wrapper of our own, instead of via whatever mechanism they already know and use. Which doesn't scale to support other USB devices in the same situation, among the many ways that would be a horrible solution to inflict on people. But there is a loophole we can exploit. We can use the libvirt qemu hook to trigger a change event for \fBudev\fP, which can in turn invoke \fBbbvirt\fP in much the same way that would happen if the device was really hotplugged, which gives us the extra layer of indirection we need to be able to safely do that from the hook. Rube Goldberg would be proud, and some of the pieces may require hand-assembly, but with all of this in place, we can have something resembling normal USB functionality in the guest machines. It's not pretty, but it will work with what we have to work with. .SS Ok, just tell me where to hit it. To string this together, you'll need to ensure all of the following: .IP - 2 The \fBudev\fP(7) rules from the bit\-babbler package are installed. If you installed this from the Debian packages that should already be done. If you didn't, you will need to install the rules that are found in \fIdebian/bit\-babbler.udev\fP from the source package to a suitable place on your system (probably \fI/etc/udev/rules.d\fP). .IP - 2 The \fBbbvirt\fP(1) script is installed in a place where the \fBudev\fP rules will find it. If you didn't install this from the Debian packages, and it isn't in \fI/usr/bin\fP, then you'll need to tweak the \fBudev\fP rules to suit. .IP - 2 The devices you wish to use in guest machines, and the machines you wish to use them in, are specified in the \fBbbvirt\fP configuration file. The default location for that is \fI/etc/bit\-babbler/vm.conf\fP. If you wish to use a different file you will need to pass its location with the \fB\-\-config\fP option in the \fBudev\fP rules, and update the hook script use that file too. The details of what you can put in that file are described in the \fBCONFIGURATION OPTIONS\fP section below. .IP - 2 The libvirt hook file is installed. If all the above is done, then devices will be added to the running guest machines if they get plugged in while the guest is running. This last step ensures devices which are already plugged in will be added to newly started guests too (which includes guests that are started automatically when the host machine boots). Until there is some safe way we can install this without conflicting with or overwriting an existing hook, everyone will need to do this step manually. If you have installed the Debian packages, then the example hook script that we've provided for this can be found in \fI/usr/share/doc/bit\-babbler/examples/qemu\-hook\fP. If you didn't it can be found in \fIlibvirt/qemu\-hook\fP of the source package. You will need to install that file as \fI/etc/libvirt/hooks/qemu\fP, or merge its content with the existing \fIqemu\fP file there if you already have that hook set. If that file did not previously exist, you will need to restart \fBlibvirtd\fP(8) to get it to begin using it. .PP That should cover all of the needed automation, but you can also attach and detach devices manually at any time too. The details of doing that will be described in the following section. Otherwise, with all the above done, there is no other reason to need to invoke \fBbbvirt\fP directly. .SH OPTIONS There are two primary modes of operation for \fBbbvirt\fP which are selected by the initial action option. If the action to perform is \fBattach\fP or \fBdetach\fP then only a single device will be acted upon, and which device that should be must be specified explicitly, even if there is only one device present on the host at the time. When invoking \fBbbvirt\fP manually, the \fIdevice\fP may be specified by its serial number, its logical address on the bus (in the form \fIbusnum\fP:\fIdevnum\fP, given as decimal integers), or its physical address on the bus (in the form \fIbusnum\fP\-\fIport\fP[\fI.port\fP\ ...]). If the action to perform is \fBattach\-all\fP or \fBdetach\-all\fP, then the device(s) to act upon are selected by \fIdomain\fP association instead. If a \fIdomain\fP is explicitly specified, then all devices which are assigned to that guest domain in the configuration file will be acted upon in the same way as if \fBbbvirt\fP was invoked for each of them individually with the \fBattach\fP or \fBdetach\fP action. If no \fIdomain\fP is provided, then all of the configured guest domains will be acted upon in this way. The following additional options are available: .TP .B \-C, \-\-config Specify an alternative configuration file to import the device assignments from. If the path to the file is not provided explicitly, then it will be looked for in the \fI/etc/bit\-babbler\fP directory (with a \fI.conf\fP suffix). .TP .BI "\-c, \-\-connect=" URI Specify the \fBvirsh\fP(1) connection \fIURI\fP to use. This will override a \fBDOMAIN_URI\fP set for the domain in the configuration file. If that is not set using either of these methods then the \fBvirsh\fP default for the user running \fBbbvirt\fP will be used. .TP .BI "\-D, \-\-domain=" name Specify the libvirt domain to act upon. This may be used to override the device allocation from the configuration file when \fBbbvirt\fP is invoked manually, or to act on a device or domain that is not currently specified in the configuration file. .TP .BI "\-b, \-\-busnum=" num Specify the USB bus number that the device is attached to. This option is mostly used to avoid \fBbbvirt\fP needing to look this up when it is already known (such as when it is called from a \fBudev\fP rule). There isn't usually much reason to pass this if invoking \fBbbvirt\fP manually, since you can just specify the device by its logical or physical address instead. .TP .BI "\-d, \-\-devnum=" num Specify the USB device number that the device is currently assigned. Together with the bus number, this forms the logical address of the device. This option is mostly used to avoid \fBbbvirt\fP needing to look this up when it is already known (such as when it is called from a \fBudev\fP rule). There isn't usually much reason to pass this if invoking \fBbbvirt\fP manually, since you can just specify the device by its logical address instead. .TP .B \-n, \-\-dry\-run Don't attach or detach any devices, just show what would be attempted if this was a live run. This option implies a minimal level of \fB\-\-verbose\fP, but the verbosity may be increased further by also passing that option explicitly. .TP .B \-v, \-\-verbose Make more noise about what is really going on. It may be passed multiple times to increase the verbosity further. .TP .B \-?, \-\-help Show a brief summary of the available options. .SH CONFIGURATION OPTIONS The \fBbbvirt\fP configuration file contains variable assignments using the \fBbash\fP(1) shell syntax. It is sourced as a shell snippet, so you could in principle construct the configuration for each domain dynamically, but most typically a simple static assignment of devices to domains will suffice. If you do elect to run code in it, you should be very defensive about namespacing any other variables you use, or any other side effects you might cause to happen. Any number of guest domains may be configured in it. For each guest domain, three variables control the behaviour of \fBbbvirt\fP: .TP .BI DOMAIN_NAME_ domain = guestname This variable is optional if \fIguestname\fP and \fIdomain\fP are the same. It must be used if the libvirt guest name contains any characters which would not be valid for use as a shell variable name (i.e. anything that is not ASCII a-z, A-Z, 0-9, or the underscore). If set, it indicates that the corresponding \fBDOMAIN_*_\fP\fIdomain\fP variables shown below are configuration for the libvirt guest domain \fIguestname\fP instead of one with the name \fIdomain\fP. When specifying a domain option for \fBbbvirt\fP to act upon, you may use either of the \fIdomain\fP or \fIguestname\fP identifiers interchangeably. .TP .BI DOMAIN_URI_ domain = URI This variable is optional, and sets the \fBvirsh\fP(1) connection \fIURI\fP to use when attaching or detaching devices from the given \fIdomain\fP. If the \fB\-\-connect\fP option is explicitly passed to \fBbbvirt\fP it will override what is set here. If the connection \fIURI\fP is not set using either of these methods then the \fBvirsh\fP default for the user running \fBbbvirt\fP will be used (which would normally be root if run from \fBudev\fP). .TP .BI DOMAIN_RNG_ domain =( " device serial numbers \fR... " ) This variable is required if automatic passthrough of devices to a domain is desired. It is a bash array, populated with a space separated list of all the device serial numbers that you want assigned to \fIdomain\fP. It is not an error for devices to be listed here which are not currently plugged in. It is important to ensure that devices are only assigned to one \fIdomain\fP though, and that devices assigned to guest domains will not be used by a \fBseedd\fP(1) instance running on the host (which means the \fBseedd\fP configuration needs to be passed an explicit list of the devices that it may use too). The device serial number must always be used here. You cannot specify a device by its logical or physical address on the bus (like you can in most other places where we take a device ID). .SH FILES .TP .I /etc/bit\-babbler/vm.conf The default configuration file for assigning BitBabbler devices to libvirt managed virtual machine domains. .TP .I /lib/udev/rules.d/60\-bit\-babbler.rules The default \fBudev\fP(7) rules granting direct device access to users in the group \fBbit\-babbler\fP, enabling USB autosuspend when the device is idle, and invoking \fBbbvirt\fP to handle device hotplug for virtual machines. These can be overridden by creating \fI/etc/udev/rules.d/60\-bit\-babbler.rules\fP and populating it with your own rules. .TP .I /etc/libvirt/hooks/qemu The libvirt hook script needed to enable cold-plugging of already present devices into newly (re)started virtual machines. .SH SEE ALSO .BR seedd (1), .BR virsh (1). .SH AUTHOR .B bbvirt was written by Ron . You can send bug reports, feature requests, praise and complaints to support@bitbabbler.org. bit-babbler-0.9/doc/man/seedd.10000644000000000000000000015730314136173163013121 0ustar .\" Hey, EMACS: -*- nroff -*- .\" First parameter, NAME, should be all caps .\" Second parameter, SECTION, should be 1-8, maybe w/ subsection .\" other parameters are allowed: see man(7), man(1) .TH SEEDD 1 "Jan 11, 2018" .\" Please adjust this date whenever revising the manpage. .\" .\" Some roff macros, for reference: .\" .nh disable hyphenation .\" .hy enable hyphenation .\" .ad l left justify .\" .ad b justify to both left and right margins .\" .nf disable filling .\" .fi enable filling .\" .br insert line break .\" .sp insert n+1 empty lines .\" for manpage-specific macros, see man(7) .SH NAME seedd \- read entropy from BitBabbler hardware RNG devices .SH SYNOPSIS .B seedd .RI [ options ] .SH DESCRIPTION The \fBseedd\fP program can be run as a foreground process or as a daemon to collect entropy from one or more BitBabbler devices, either streaming it to \fIstdout\fP for general purpose use, making it available on a UDP socket, or directly seeding the kernel entropy pool with it on demand. .SH USAGE The number of configurable options for \fBseedd\fP has now outgrown what most people will care about or want to use, which would normally be less than ideal for something like this, but it does have a rather diverse range of user needs, and it is important that we support those well. Unless you fall into the special use category, then the following examples are probably about all (or still more than) you might ever need: Show all available BitBabbler devices, in detail: seedd \-sv (or \-\-scan \-\-verbose) Output 1 million bytes to a file, drawn from all available devices: seedd \-b 1000000 > random\-bytes.out Stream entropy continuously to \fIstdout\fP (with no control socket): seedd \-o \-c none | your-thing-reading-stdin Run as a daemon, feeding entropy to the OS kernel pool: seedd \-k \-d To read from only specific devices, add the \fB\-\-device\-id\fP option too. .SH OPTIONS The following options are available: .TP .B \-s, \-\-scan Scan the system for available BitBabbler devices, reporting them in a human readable format. .TP .B " \-\-shell\-mr" Scan the system for available BitBabbler devices, reporting them in a machine readable format that is suitable for importing into shell scripts. .TP .BI "\-C, \-\-config=" file Read configuration options from a \fIfile\fP. Details of the contents of that file is described in the \fBCONFIGURATION FILE FORMAT\fP section below. This option may be passed multiple times to import configuration from multiple files, and precedence of the options set in them is the same as if they were passed on the command line at the point where this option is used. Where option settings are duplicated, the last one seen is the one which will be applied. Which means if you want options on the command line to override any setting in the config file(s), they must be passed after this option. Options which are cumulative will continue to accumulate wherever they are seen, so for example, passing an additional \fB\-\-device\-id\fP on the command line will simply add an extra device, it will not override the use of any devices defined in the configuration file. But you can still override any per-device options which may be set there. The only exception to this is \fB\-\-verbose\fP, where the verbosity level selected on the command line will always override any setting from a configuration file regardless of the order they appear in. .TP .BI "\-i, \-\-device\-id=" id Select a BitBabbler device to read from by its unique ID. If no devices are explicitly specified then the default is to use all of them (including any devices that may be plugged in at a later time). This option may be passed multiple times to attach to multiple devices. It is not an error to specify a device that is not currently present on the system. If hotplug support was enabled at compile time and available on the system at runtime, then such devices will be added to the pool at runtime if they are later plugged in. The \fIid\fP may be the device serial number, or its logical address in the form: [\fIbusnum\fP:]\fIdevnum\fP or on systems where knowing the USB topology is supported, its physical address in the form \fIbusnum\fP\-\fIport\fP[.\fIport\fP\ ...] For a logical address the \fIbusnum\fP part is optional, but if \fIdevnum\fP is not unique across all buses, then exactly which device will be selected if it is not fully specified becomes a matter of chance. All of the available IDs which can be used to refer to a device will be reported by the \fB\-\-scan\fP option. Bus, device, and port numbers are expected to be decimal integers. The logical address isn't usually very useful to use when hotplug activity is expected, since it is allocated dynamically and is 'unpredictable' for most purposes here. .TP .B \-d, \-\-daemon Fork to the background and run as a daemon process. If this option is not specified then \fBseedd\fP will remain in the foreground. This option will be ignored if \fBseedd\fP is started by \fBsystemd\fP(1) as a service using the \fBnotify\fP start-up type. .TP .BI "\-b, \-\-bytes=" n Send \fIn\fP bytes of entropy to \fIstdout\fP. The process will exit when that is completed. This option will be ignored if either the \fB\-\-kernel\fP or \fB\-\-udp\-out\fP options are used. A suffix of 'k', 'M', or 'G' will multiply \fIn\fP by the respective power of two. If this option is not used, then entropy will be output until the process is explicitly terminated (or receives SIGPIPE). Passing this option implies \fB\-\-stdout\fP, and also \fB\-\-control\-socket=none\fP unless the control socket option is explicitly passed to enable it. .TP .B \-o, \-\-stdout Stream entropy directly to \fIstdout\fP. If the \fB\-\-bytes\fP option is not used, this will output an unlimited stream of entropy until the process is halted. .TP .B \-k, \-\-kernel Feed entropy directly to the kernel \fI/dev/random\fP pool on demand. .TP .B " \-\-ip\-freebind If enabled, this option allows binding to an IP address that is non-local or does not (yet) exist. This permits the \fB\-\-udp\-out\fP and TCP \fB\-\-control\-socket\fP options to be configured to listen on an IP socket without requiring the underlying network interface to be up, or the specified IP address configured, at the time that \fBseedd\fP will try to bind to it. Which can be useful if you want \fBseedd\fP to be started as early as possible when the system is booted, without waiting for network configuration to occur. This option has no effect if \fBseedd\fP is not configured to listen and provide a service on any IP address, and it is not supported on all platforms. A warning will be logged if it is enabled on an unsupported platform, but that is not a fatal error (however actually attempting to bind to an unconfigured address quite likely still will be on most sane platforms). On Linux, any user may enable this option. On OpenBSD it requires superuser privilege, and on FreeBSD it requires \fBPRIV_NETINET_BINDANY\fP privileges. It is a fatal error to attempt to enable this with insufficient privilege. If this option is not enabled, then it is a fatal error for the \fB\-\-udp\-out\fP service or a TCP \fB\-\-control\-socket\fP to be bound to an address which is not already configured when seedd is started. This is a configurable option because in different circumstances both behaviours can be the more desirable choice. There is value in strong sanity checking of an address which is always supposed to already be available for use. .TP .BI "\-u, \-\-udp\-out=" host : port Bind a UDP socket to the given address, which clients can use to request blocks of entropy directly from the internal pool. The \fIhost\fP part can be a DNS hostname or address literal. If an IPv6 address literal is used it should be enclosed in square brackets (e.g. [::1]:2020 to bind to port 2020 on the local IPv6 interface). The \fIport\fP can be a port number or a service name (as defined in \fI/etc/services\fP or other system name-service databases which are queried by \fBgetaddrinfo\fP(3)). To obtain entropy from this port, write the desired number of bytes to it as a two-octet network-order short integer. It will reply with a datagram containing the requested number of bytes of entropy. Requests for 1 to 32768 bytes will be honored as soon as there is sufficient entropy in the internal pool to do so. Requests outside of that range are invalid and will simply be ignored. Note that no access control is placed on the socket, so if it uses a publicly accessible address anyone will be able to read entropy from it (and potentially to use it as a traffic amplifier if requests use a forged source address). This facility is mainly provided for use on operating systems like Windows, where the native interfaces may be of questionable usefulness or quality and cannot be audited - but it is generic and so can be used on any system where obtaining entropy directly from the BitBabbler devices might be desirable. On Linux systems we do recommend using the system \fI/dev/(u)random\fP interfaces though, since that will mix in other entropy and transparently benefit all existing applications. They aren't mutually exclusive though, you can use both this and the \fB\-\-kernel\fP option together too. .TP .BI "\-c, \-\-control\-socket=" path Set the filesystem path for the query and control socket that may be used to obtain information and statistics about the performance of the BitBabbler devices and control some aspects of the running process. The special value of \&'none' may be passed to disable the creation of a control socket. Mostly this option is useful if you have more than one \fBseedd\fP process running which are each controlling different sets of devices. On systems where unix domain sockets are not available, or if you wish to make the control socket visible to other machines on the network, you can instead use a string of the form \fItcp:host:port\fP, where the \fIhost\fP and \fIport\fP parts are as described in the \fB\-\-udp\-out\fP option above. Note that there is no access control when a TCP socket is used, so any user on any machine that is able to connect to this port will be able to do anything the control socket allows. If this option is used to change the default control-socket address, then you will also need to explicitly specify the new address to other tools accessing it too, like \fBbbctl\fP(1) and the \fBmunin\fP plugin. .TP .BI " \-\-socket\-group=" group Permit access to the control socket by members of the named \fIgroup\fP. If this option is not specified, then only the owner of the \fBseedd\fP process will be able to connect to that socket. The \fBadm\fP group may be a reasonable choice to set this to on many systems (it is the default used by the Debian package init scripts), but you are free to use any group for this which best suits local access policies. This option has no effect if a TCP port is used for the control socket instead of a unix domain socket path. .TP .B \-v, \-\-verbose Make more noise about what is going on internally. If used (once) with the \fB\-\-scan\fP option this will show more information about each device, but otherwise it's mostly only information useful for debugging. It may be passed multiple times to get swamped with even more information. As an exception to the handling of most cumulative options, this one will override any previous \fBverbose\fP setting read from a configuration file, not accumulate additional verbosity on top of that. .TP .B " \-\-version" Report the \fBseedd\fP release version. .TP .B " \-\-gen\-conf" Output text (to \fIstdout\fP) suitable for use as a configuration file based on the command line options that were passed. None of those options will actually be acted upon, \fBseedd\fP will simply exit after it has output a configuration which would do the same thing as that set of options. Any command line options which have no equivalent configuration file option will simply be ignored (i.e. .BR \-\-scan ", " \-\-shell\-mr ", " \-\-bytes ", " \-\-stdout ). .RB The " \-\-help " and " \-\-version " options should not be used together with this one, since they too will short-circuit and exit before the action of this option is performed. Any unknown or illegal options passed after this one on the command line will cause \fBseedd\fP to exit with a failure (non-zero) return code and without emitting the usual usage help text or any otherwise resulting configuration options. This allows its use to be safely scripted when the input and output cannot or will not be immediately examined for proper sanity. .TP .B \-?, \-\-help Show a shorter version of all of this, which may fit on a single page, FSVO of page size. .SS Pool options These options may be used to control the behaviour of the entropy collection pool. You normally shouldn't need to change or set any of these options unless you have very special requirements and know exactly what you are doing and why. .TP .BI "\-P, \-\-pool\-size=" n Specify the size of the internal entropy pool. Entropy read from a BitBabbler will gather in that pool after health and sanity checking. When multiple BitBabbler devices are in use, entropy from each group of devices will be mixed into it. Entropy read from \fIstdout\fP, or the UDP socket, or delivered to the kernel will be drawn from this pool. Fresh entropy will continue to be mixed into it while it is not being drained faster than it can be filled. The default pool size is 64kB, which provides a reasonable balance between what a single BitBabbler running at 1Mbps can fill completely about twice per second, and what most reasonable consumers might ever want to draw from it 'instantly'. There probably aren't many good reasons to make it much larger, but making it smaller will increase the number of input bits mixed into each output bit if the pool is not being drained completely faster than it can fill. We do not rely on this mixing to obtain good quality entropy from each BitBabbler device but it doesn't hurt to be mixing more good entropy into it while the demand is exceeded by supply. .TP .BI " \-\-kernel\-device=" path Set the device node used to feed fresh entropy to the OS kernel. You normally shouldn't ever need to set this explicitly, as the default should be correct for the platform we are running on. This option has no effect unless the \fB\-\-kernel\fP option is being used. .TP .BI " \-\-kernel\-refill=" sec Set the maximum time in seconds before fresh entropy will be added to the OS kernel pool, even when it has not been drained below its usual refill threshold. This option has no effect unless the \fB\-\-kernel\fP option is being used. When feeding the OS pool, \fBseedd\fP will be woken to immediately add more entropy to it any time that it falls below the configured minimum watermark (which on Linux is set by \fI/proc/sys/kernel/random/write_wakeup_threshold\fP and can be configured persistently in \fI/etc/sysctl.d/bit\-babbler\-sysctl.conf\fP). In addition to that, it will also wake up periodically to mix fresh entropy into the OS pool even if it is not being consumed (testing that the output of the device is still passing all the QA testing in the process). This option configures how long it will wait since the last time fresh entropy was added before doing that. If set to 0, then we will never add more entropy unless explicitly woken by the OS pool falling below its watermark. The default is 60 seconds, and there probably aren't many reasons to reduce that, but you may want to increase or disable it on low power systems which you don't want to be waking up just to do this. The main downside to increasing it is that on relatively quiet systems it may take (significantly) longer for the long term QA tests (in particular the 16 bit tests) to accumulate enough results for analysis, and you lose some of the confidence that comes with a higher rate of continual sampling from the device. This option lets you choose the right balance for your own use. If unsure, leaving it at its default setting is probably the right answer. .TP .BI "\-G, \-\-group\-size=" group_number : size Set the size of a single pool group. When multiple BitBabbler devices are available, there is a choice of whether to optimise for throughput or for redundancy. For example a pair of devices both running at 1Mbps can together produce an effective throughput of 2Mbps of entropy if their streams are output independently of each other, but they can also be mixed together in parallel to provide a stronger guarantee of entropy at 1Mbps with the stream being at least as unpredictable as the most unpredictable device. With more than two devices a combination of both strategies may be used. Devices that are placed in the same group will not add entropy to the pool until every device in that group has contributed at least \fIsize\fP bytes to it. If the devices are not running at the same bit rate, the faster device(s) will continue to mix entropy into the group until every device has contributed. This option enables configuration of that block size. The \fIgroup_number\fP is an arbitrary integer identifier (which will be passed to the \fB\-\-group\fP option for the device(s) to add to it). The \fIsize\fP may be followed by a suffix of 'k', 'M', or 'G' to multiply it by the respective power of two. The group size will be rounded up to the nearest power of two. Default is for groups to be the same size as the pool, but they may be set either smaller or larger than it if desired. The two values are separated by a colon with no other space between them. .SS Per device options The following options may be used multiple times to individually configure each device when more than one BitBabbler is available. If passed before any \fB\-\-device\-id\fP option, then they set new default values which will apply to every device. If passed after one of those options they will only be applied to the immediately preceding device. .TP .BI "\-r, \-\-bitrate=" Hz Select the device bitrate in bits per second. The available bitrates are determined by an integer clock divider, so not every rate is exactly achievable. An unsupported rate will be rounded up to the next higher rate. For convenience the rate may be followed by an SI multiplier (eg. 2.5M for 2500000). .TP .BI " \-\-latency=" ms Override the calculated value for the USB latency timer. This controls the maximum amount of time that the device will wait if there is any data in its internal buffer (but less than a full packet), before sending it to the host. If this timer expires before a packet can be filled, then a short packet will be sent to the host. The default value is chosen to ensure that we do not send more short packets than necessary for the selected bitrate, since that will increase the number of packets sent and the amount of CPU time which must be spent processing them, to transfer the same amount of data. Unless you are experimenting with changes to the low level code, there is probably no reason to ever use this option to override the latency manually. .TP .BI "\-f, \-\-fold=" n Set the number of times to fold the BitBabbler output before adding it to the pool. Each fold will take the first half of the block that was read and XOR it with the bits in the second half. This will halve the throughput, but concentrate the available entropy more densely into the bits that remain. There are two main things this is expected to do based on the BitBabbler design. It will better mix the low-frequency noise that is captured with that of the higher frequencies, allowing it to sample at higher bitrates without narrowing the noise bandwidth available to influence adjacent bits. It will help to break up any transient local correlations that might occur in the physical processes from which ambient environmental noise is collected. Folding should never reduce the real entropy of each sample, but when all is working exactly as it should, it may not do anything to increase it either. Mathematically, an XOR summation is expected to exponentially smooth any bias in a stream of independent bits, with the result having at least as much entropy as the least predictable of either of the two inputs (in the same way that a one time pad is no less secure despite the plaintext having much less entropy than the pad does). .TP .BI "\-g, \-\-group=" n The entropy pooling group to add this device to. See the \fB\-\-group\-size\fP option for a discussion of pool groups. You do not need to declare or define a group in any way before using this option, devices that have the same group number specified will be simply be grouped together. By default, all devices are placed in group 0 if this is not set explicitly for them. The group 0 is special in that its size can be set explicitly, but it does not wait for all devices in it to have contributed entropy before mixing into the common pool, which is functionally equivalent to all of those devices being placed into separate groups that are the same size. Normally if a single device in a group fails QA testing, then the entire group will stop contributing to the pool until it is removed or further extended testing confirms that failure to be an anomaly and not a persistent condition. For group 0 (and devices in other separate groups), a failed device will not prevent the remaining devices from continuing to contribute entropy if their own output is still passing the QA testing. .TP .BI " \-\-enable\-mask=" mask Select a subset of the generators on BitBabbler devices with multiple entropy sources. The argument is a bitmask packed from the LSB, with each bit position controlling an individual source, enabling it when set to 1. .TP .BI " \-\-idle\-sleep=" initial : max This option permits tuning how the devices back off from generating entropy at the maximum rate, when it is not being consumed from the output pool. When the output pool is not full, entropy will be read from the devices as quickly as possible to try to refill it. Once it is full, they will begin to be throttled according to the following algorithm: The \fIinitial\fP value is the number of milliseconds to sleep when the output pool first becomes full again. If this value is 0, then the device will immediately remain idle until the output pool is no longer full. Otherwise, reading from the device will pause for either this number of milliseconds, or until the pool is no longer full, whichever comes first. If that timeout expires and the pool is still full, another block of entropy will be generated and mixed into the pool, then the timeout will be doubled. This process will continue until the timeout reaches the \fImax\fP value (which is also in milliseconds), at which point it will not increase any further. The device will always be woken immediately any time the output pool is not full, and the timeout cycle will begin again from the \fIinitial\fP value each time that occurs. As a special case, if the \fImax\fP value is set to 0, with an \fIinitial\fP value that is not zero, the exponential back off will occur as above until the timeout reaches or exceeds 512 ms, at which point further activity will again be suspended indefinitely until the output pool is no longer full. This allows for a mode of operation where the device will still go into a hard suspend when no entropy is being consumed from the output pool, but only after mixing several blocks of entropy from each device that is configured this way into it. The default configuration used if this is not set explicitly is \fIinitial\fP=100 and \fImax\fP=60000. Usually the only reason to change this is if you are trying to minimise the power usage on a low power system which you don't want continually waking up to generate entropy that nothing is using. For that use, if you are feeding the OS kernel pool, you will probably also want to set the \fB\-\-kernel\-refill\fP option to a suitable value, since it will cause the devices to wake up independently of what is set here (by reading from the output pool, making it be no longer full). Dialling the verbosity up to level 6 (with \fB\-vvvvvv\fP) while tweaking this will let you watch how the reads from the devices are actually throttled. When setting this, either of \fIinitial\fP or \fImax\fP may be omitted (in which case they will retain their default value), but the ':' must always be included. It probably doesn't make a lot of sense to set this differently for each device (especially not for devices which are grouped together), but that is permitted if you really have some reason to want to do that. .TP .BI " \-\-suspend\-after=" ms Set the minimum expected device idle time for which we should allow the device to be suspended. On Linux, USB devices that are idle can automatically be suspended into a low power state, but in order to qualify as being 'idle' for that purpose, we need to release our claim on the device. Full details of the OS part of that can be found here: .nh https://www.kernel.org/doc/Documentation/usb/power\-management.txt .hy The default is 0, which means \fBseedd\fP will never release a device it has claimed. The benefit of this is that no other process can claim it while it is released (accidentally or otherwise), which would prevent us from being able to use it again when we do require entropy from it. It also ensures there is minimal latency when we are woken up to read entropy from it again. Setting this to a value greater than zero means that when the output pool is full, and we are expecting to sleep for at least that amount of time before reading from the device again, then the claim on the device will be released, and the OS will be able to suspend it until we need it again. If the pool is drained and requires more entropy before that time, then we will still reclaim the device immediately and begin reading from it again, but there will be a small amount of additional latency while it wakes up and is reinitialised for use. This option should usually be set in conjunction with \fB\-\-idle\-sleep\fP and \fB\-\-kernel\-refill\fP which control how often the device will be woken again to refresh the entropy pools when it might otherwise have remained idle. If they never allow it to sleep for longer than this time, then this option will have no effect. It probably doesn't make much sense to set this below about 10000 (10 seconds) otherwise the overhead of releasing, reclaiming, and reinitialising the device might actually use more power than suspending it saves. And it definitely doesn't make much sense to set it to a value less than what is configured for the \fIautosuspend_delay_ms\fP option in the kernel, since while we will release the device any time that we \fIexpect\fP to sleep for this long (regardless of whether we actually do or not), the kernel will not actually suspend it until the \fIautosuspend_delay_ms\fP time has elapsed \fIafter\fP we have released it. So if it doesn't get to actually suspend it, we would just be chewing extra CPU cycles, and adding extra latency to obtaining entropy when it is needed, for no net gain. .TP .B " \-\-low\-power" This is a convenience option, which is equivalent to setting: .nh .nf \-\-kernel\-refill=3600 \-\-idle\-sleep=100:0 \-\-suspend\-after=10000 .fi .hy And which in turn means: We will wake up to mix more entropy into the kernel pool at least once an hour (though it is likely that most systems will already drain it below its threshold and so wake us to refill it before that time expires anyway). We will mix at least 6 blocks of fresh entropy into the \fBseedd\fP output pool each time we are woken, before suspending indefinitely again (until either we are woken by the kernel needing entropy or by the timeout above expiring, or until something else consumes entropy from the output pool - such as from the UDP socket if that is enabled). This is based on doubling the \fIinitial\fP \fB\-\-idle\-sleep\fP timeout each time the output pool remains full, until we exceed the minimum amount of time that really will perform a sleep (512ms), and then sleeping until explicitly woken again after that. We will release the device, giving the OS the opportunity to suspend it, each time it does become fully idle (since an indefinite sleep is considered to be longer than any fixed amount of time). Any or all of those options may still be customised by passing them explicitly \fIafter\fP this option on the command line (in the same way that passing them twice would also override the first instance). This isn't necessarily the configuration offering the lowest possible power consumption, but it's intended to strike a reasonable balance for systems where keeping idle power consumption low is a more important concern than continually mixing in additional fresh entropy or minimising the latency if demand for entropy suddenly surges (which is what the normal defaults are more oriented toward). At the very least it should be a reasonable starting point to begin experimenting from on low power systems. Note that although this option may be specified per-device, the \fB\-\-kernel\-refill\fP time is a global option, and that setting will be applied if the configuration for any device uses this option, even if that device isn't currently available for use. That normally shouldn't be a problem though, since the question of whether minimising power use is more important than other considerations is usually a system-wide one anyway. .TP .B " \-\-limit\-max\-xfer" Limit the maximum transfer chunk size to 16kB. On Linux, prior to kernel 3.3, individual bulk transfer requests were somewhat arbitrarily limited to 16kB. With kernels later than that, larger transfers were supported, and we will make use of those to optimise transfer speeds for large requests at high bitrates when a new enough kernel and libusb with support for this are both available. Unfortunately, the USB chipsets on some motherboards are still buggy when these larger transfers are used, and there is no easy way for us to automatically detect those, since the symptoms they exhibit do vary and aren't always directly visible to our code. Ideally any problems like that should be reported to the kernel maintainers, where they can be fixed, or worked around for device specific quirks, but this option allows you to explicitly request that the transfer size be limited on machines where you are experiencing problems with that. If in doubt, it is safe to use this option on any system, the impact on transfer speed is relatively minimal unless you are trying to obtain huge numbers of bits as quickly as possible. But it's not the default, since at present only a very small number of systems are still known to be affected, and that number should continue to decrease over time. .TP .B " \-\-no\-qa" Disable gating entropy output on the result of quality and health checking. You pretty much never want to use this unless you are generating streams to \fIstdout\fP for no other reason than to analyse their quality with some other tool, such as \fBdieharder\fP(1) or the NIST test suite or similar. For that type of use we definitely don't want to be filtering out blocks which have already failed our own internal quality analysis, otherwise the value of such testing will be almost as tainted as that of the people who say "after whitening our RNG with SHA-1 it now passes all of the statistical tests perfectly!", and there's already more than enough fossils in that tarpit. It is not possible to disable this for data which is passed directly to the kernel entropy pool, there is absolutely no reason to ever want to do that, nor does it disable the gating for bits requested from the UDP socket interface. It does not actually disable the QA checks from being performed (so the results of them will still be seen in the monitoring output and can generate external alerts if this mode was entered 'by accident'). It just permits any failing blocks to still pass through to \fIstdout\fP, so other tools can heap all the scorn on the output that it deserves if it is failing. .SS Extended QA options Since we already have some high quality QA analysis running on the output of the BitBabbler devices, it makes sense to also be able to use that to sample, analyse, and monitor the quality of entropy from other independent sources and downstream points that end user applications may be obtaining it from too. .TP .BI " \-\-watch=" path : delay : block_size : bytes Monitor an external device. This option does not directly effect the operation of collecting entropy from BitBabbler devices, or contribute in any way to the entropy that is output, either to \fIstderr\fP or the kernel. What it does do is leverage the quality assurance and health checking algorithms, and the trend monitoring functionality that this software provides, to also permit continuous supervision of other sources which are expected to be statistically random. For example it can be used to regularly sample from \fI/dev/urandom\fP or even from \fI/dev/random\fP to ensure the quality of their output is really what you expect it to be. There's little point to putting the most awesome entropy that the universe can conjure in, if what's coming out and feeding the applications that are consuming it is totally predictable garbage. If this is used to monitor a limited source of blocking entropy, such as \fI/dev/random\fP then you'll want to be judicious in selecting the rate of reading from it, so as not to consume all the available entropy that you were aiming to gain by feeding it from a BitBabbler in the first. If it's reading from an 'unlimited' source backed by a PRNG, such as \fI/dev/urandom\fP, then the only real consideration is how much of the other system resources do you want to consume in drinking from the firehose. The \fIpath\fP is the filesystem path to read from, it can be anything which can be opened and read from like a normal unix file. The \fIdelay\fP is the amount of time, in milliseconds, to wait between reading blocks of data from it. The \fIblock_size\fP is the number of bytes to read in a single block each time the watch process wakes up to read more. The total amount of data to read can by limited to \fIbytes\fP, once that limit is reached, the watch process for \fIpath\fP will end (but all other processing will continue as per normal). All qualifiers except the \fIpath\fP are optional, and separated by colons with no other space between them, but all options must be explicitly set up to the last one that is provided. The \fIdelay\fP may be followed by a suffix of \&'k', 'M', or 'G' to multiply it by the respective power of 10, or by 'ki', \&'Mi', or 'Gi' for powers of two if you're into that kind of thing. The \fIblock_size\fP and \fIbytes\fP options may be similarly suffixed, but like all good sizes on computers are always a power of two if so. .SH CONFIGURATION FILE FORMAT In addition to use of the command line options, \fBseedd\fP configuration may be supplied by "INI" format files encoded as \fBSections\fP, \fBOptions\fP and \fBValues\fP. Since there is no standard definition for that format, the general rules applicable here are as follows: A \fBSection\fP definition begins on a line where its name is enclosed in square brackets. The Section name itself may contain any characters except square brackets. Any characters following the closing square bracket on the same line will simply be ignored, but a well formed file should not rely on that always remaining true. All following \fBOption\fP/\fBValue\fP pairs belong to that Section until the next Section header occurs. Option names may include any characters except whitespace. Leading and trailing whitespace around Option names and Values is ignored. Internal whitespace in Option values is preserved. Options must be defined on a single line, and everything (except leading and trailing whitespace) following the Option name up to the end of the line is part of the Value. Quote characters (of any sort) have no special meaning and will be included as a literal part of the value. Comments must appear on their own line, with the first (non-whitespace) character of the line being '#'. If Options are duplicated in a configuration file, either in a single file or when multiple configuration files are used, then any options which are repeated will override the values which were set for them previously. Options specified on the command line have a similar precedence, with the exception of \fB\-\-verbose\fP, they must come after all \fB\-\-config\fP files if they are to override them and not be overridden by them. The following Sections and Options may be used. See the equivalent command line options (as indicated) for a full description of their behaviour. In most cases, the option names are the same as the long form of the command line option. It is an error for an unknown Section or Option to be present. .SS [Service] section The \fBService\fP section is used to configure process-wide behaviour using the following options: .TP 4 .B daemon .br Fork to the background and run as a daemon process (\fP\-\-daemon\fP). .TP .B kernel .br Feed entropy to the OS kernel (\fB\-\-kernel\fP). .TP .B ip\-freebind .br Allow binding to an IP address that is non-local or does not (yet) exist (\fB\-\-ip\-freebind\fP). .TP .BI udp\-out " host" : port Provide a UDP socket for entropy output (\fB\-\-udp\-out\fP). .TP .BI control\-socket " path" Where to create the service control socket (\fB\-\-control\-socket\fP). .TP .BI socket\-group " group" Give users in this system group permission to access the control socket (\fB\-\-socket\-group\fP). .TP .BI verbose " level" Set the logging verbosity level. A \fIlevel\fP of 2 is equivalent to using \fB\-vv\fP (\fB\-\-verbose\fP). .SS [Pool] section The \fBPool\fP section is used to configure the entropy collection pool. You normally shouldn't need to change or set any of these options unless you have very special requirements and know exactly what you are doing and why. .TP 4 .BI size " n" The size of the internal entropy pool in bytes (\fB\-\-pool\-size\fP). .TP .BI kernel\-device " path" The device node used to feed fresh entropy to the OS kernel (\fB\-\-kernel\-device\fP). This option has no effect unless the \fB\-\-kernel\fP option is being used. .TP .BI kernel\-refill " sec" The maximum time in seconds before fresh entropy will be added to the OS kernel, even when it hasn't drained below its usual refill threshold (\fB\-\-kernel\-refill\fP). This option has no effect unless the \fB\-\-kernel\fP option is being used. .SS [PoolGroup:\fIn\fP] sections Defines an entropy collecting group and the size of its pool (\fB\-\-group\-size\fP). The group number \fIn\fP is an integer value used by the per-device \fB\-\-group\fP option to assign a device to that group. Any number of groups may be defined, but each must have a unique value of \fIn\fP. .TP 4 .BI size " n" The size of the group pool (\fB\-\-group\-size\fP). .SS [Devices] section The \fBDevices\fP section configures the defaults to use for all BitBabbler devices which don't override them in a per-device section (or on the command line). All options set here do the same thing as the command line options with the same name when passed before any \fB\-\-device\-id option. .TP 4 .BI bitrate " Hz" The rate in bits per second at which to clock raw bits out of the device (\fB\-\-bitrate\fP). .TP .BI latency " ms" Override the calculated value for the USB latency timer (\fB\-\-latency\fP). .TP .BI fold " n" Set the number of times to fold the BitBabbler output before adding it to the pool (\fB\-\-fold\fP). .TP .BI group " n" The entropy \fB[PoolGroup:\fP\fIn\fP\fB]\fP to add the device to (\fB\-\-group\fP). .TP .BI enable\-mask " mask" Select a subset of the generators on BitBabbler devices with multiple entropy sources. The argument is an integer bitmask packed from the LSB, with each bit position controlling an individual source, enabling it when set to 1 (\fB\-\-enable\-mask\fP). .TP .BI idle\-sleep " initial" : max Configure how devices back off from generating entropy at the maximum rate when it is not actually being consumed by anything (\fB\-\-idle\-sleep\fP). .TP .BI suspend\-after " ms" The threshold in milliseconds where if we expect the device to be idle for longer than that, we will release our claim on it, allowing the OS to put it into a low power mode while it is not being used (\fB\-\-suspend\-after\fP). .TP .B low\-power Enable options for better power saving on lightly loaded systems (\fB\-\-low\-power\fP). .TP .B limit\-max\-xfer Limit the maximum transfer chunk size to 16kB (\fB\-\-limit\-max\-xfer\fP). .TP .B no\-qa Disable gating entropy output on the result of quality and health checking (\fB\-\-no\-qa\fP). .SS [Device:\fIid\fP] sections Sections with a \fBDevice:\fP prefix can be used to both enable and configure individual devices. It is the equivalent of passing .BI \-\-device\-id= id on the command line. If any options are specified for this section, that is equivalent to passing them after the \fB\-\-device\-id\fP option in that they will only apply to this device and no others. All the options for the \fB[Devices]\fP section above may be used here. If no \fB[Device:]\fP sections are defined, the default is to operate on all of the devices which are available. It is not an error to define these sections for devices which are not, or may not be, present at any given time. .SS [Watch:\fIid\fP] sections Sections of this type can be used to run our QA testing on some other external source of random bits, provided we can read them as if they were a file or named pipe or device node. Any number of \fB[Watch:]\fP sections may be defined, each just needs its own unique label after the \fBWatch:\fP prefix to identify it. The \fIid\fP has no use or meaning other than to make each watch section name unique. The options below correspond to the component parts of the argument passed with \fB\-\-watch\fP on the command line. .TP 4 .BI path " device" The path to the device/pipe/file to read bits from. This option must be set for every watch section. .TP .BI delay " ms" How long to wait before reading the next block of bits, in milliseconds. Default is 0. .TP .BI block\-size " bytes" The number of bytes to read between each delay period. Default is 64k. .TP .BI max\-bytes " bytes" The maximum number of bytes to read in total. Default is 0 which implies reading an 'infinite' number of bits for as long as the process keeps running, which is probably what you usually want when using this. .SH CONTINUOUS MONITORING The query and control socket enables device performance and QA statistics to be examined in real-time. The \fBbbctl\fP(1) tool can be used to produce human readable reports on demand from the information it provides, but it can also be queried directly by other tools that want that information in a more machine readable form (see the \fBjson_protocol\fP document for a full description of that). For users of \fBmunin\fP, a plugin is provided which will continuously graph the status of each device, and which can be used to trigger an alert if an abnormal condition occurs. The \fBmunin\fP plugin requires the perl \fBJSON::XS\fP module (provided by the \fBlibjson\-xs\-perl\fP package on Debian systems), and it must be explicitly enabled on each system where it is desired to run. Typically that will require doing something like this: .nh .nf # munin\-node\-configure \-\-shell # ln \-s /usr/share/munin/plugins/bit_babbler /etc/munin/plugins/bit_babbler # service munin\-node restart .fi .hy If \fBmunin\-node\-configure\fP does not report that plugin autoconfiguration succeeded, the most likely reason is that \fBJSON::XS\fP is not available. There are a few options to configure the plugin's behaviour, these are all documented in .nh \fI/etc/munin/plugin\-conf.d/bit\-babbler\fP .hy (where they should be set if desired). The \fBmunin\-node\fP service needs to be restarted for changes to its plugins to take effect. .SH BOOT SEQUENCING When \fBseedd\fP is being used to feed entropy to the OS kernel, there are two main considerations to deal with. On modern systems where the kernel random source is used by almost every process, if only for ASLR, we want that to be well seeded with the best entropy available as early as is possible. And ideally we also want any services which do have a critical need for high quality entropy to not be started until that can be guaranteed by proper QA testing of the entropy stream. Historically, the mechanics of ensuring all that was not just an OS-specific detail, but it also varied, sometimes greatly, even between different flavours of the various OS platforms. So it was up to the higher level packaging for each OS variant, and the users of them, to implement something appropriate for each environment. That is still largely true, but for people using Linux distributions where the \fBinit\fP(1) process is provided by systemd now, we can in theory provide some defaults which should suit most users and still be fairly easily customisable for specific use case requirements as well. Since configuring systemd correctly for anything not completely trivial is still something of a black art, which a lot of services (and even the systemd provided ones) seem to still get wrong to some degree or another, it does make sense for us to provide a tested configuration - along with some guidance to users of other platforms and init systems about what they should be aiming for. What follows is a description of the boot sequencing options implemented for a systemd environment, but the general requirements of that should be broadly applicable too since they don't do anything magical which couldn't be done in another alternative environment. By default we provide two systemd "service units" to implement the requirements outlined above. .SS The seedd daemon service (\fIseedd.service\fP) This unit provides the ordinary functionality of ensuring that \fBseedd\fP is started at the earliest possible time in the boot sequence where its requirements are able to be met. For that reason its requirements should be (and deliberately have been) kept as minimal as reasonably possible. It needs access to some low-level system libraries, to its \fIseedd.conf\fP configuration file (though that could be eliminated at the cost of some user-friendliness by hardcoding the options to run it with in the unit itself), to a writable directory where it will create its control-socket, and to the system device files where the BitBabbler and kernel random devices will be found. Since the BitBabbler devices can be hotplugged, we don't actually need to wait for them to be present to start this - and in practice with the current unit configuration, \fBseedd\fP is almost certain to be started before the USB devices have been probed and announced to the system, or before even \fBudev\fP is running to notify it about them. This means it will be ready to use them at the soonest possible moment that they do become available. This unit is installed by default, but it must still be explicitly enabled, either by the distro packaging (which we do recommend does this, and which the Debian packages indeed do), or by the local admin if they manually installed this software from source themselves. It is the equivalent of what the SysV init script provided for Debian based systems will do on systems which aren't using systemd as their init system. It is always safe for \fBseedd\fP to be running even when no BitBabbler devices are currently available in the system, it just won't do much unless also configured to watch some external source. .SS Waiting for initial kernel seeding (\fIseedd\-wait.service\fP) This optional unit is also installed by default, but it generally should \fBnot\fP be enabled automatically by distro packaging, only at the explicit request of a local admin. It provides a boot sequence point with some more complete and useful guarantees: .HP 3 \ - That \fBseedd\fP has successfully been started and is running. .HP 3 \ - That at least one BitBabbler device (or more depending on the configuration used for \fBseedd\fP) is available and operating correctly, and able to provide the system with fresh QA checked entropy. .HP 3 \ - That good quality entropy obtained from the available device(s) has been provided as initial seed material to the OS kernel. .PP If simply enabled on its own, this unit will delay starting anything which is scheduled to be started later than \fBseedd\fP in the boot sequence (or more specifically, anything which wouldn't be started until after all of the local mount points in \fI/etc/fstab\fP have been mounted - which should be before most services that aren't part of the early boot initialisation), until the three conditions above have been met. It will wait for up to 30 seconds for that to occur before timing out and entering a failed state, after which the rest of the boot sequence will then still continue normally. This provides a reasonable compromise between a guarantee that good entropy will actually be used if it is possible to obtain it, and not rendering the system completely unable to boot if for some reason it is not. If you wish to enable it, you can do that with: # systemctl enable seedd\-wait.service If you wish to change the timeout period, you will need to edit or override this unit to change either or both of the timeout used in \fB\-\-waitfor\fP and the \fBTimeoutStartSec\fP option. It should be long enough for devices to become available, and have enough entropy read from them to be QA checked for use as early seed material, but not so long that booting is delayed needlessly when it is clear that nothing is likely to change if we just wait longer. .SS When failure is not an option If a stronger guarantee than the above really is needed, either system-wide or just for particular services, then declaring a \fBRequires\fP relationship with this unit will prevent anything which does so from starting, both before this task has completed and if this task should fail. For example if you wanted to prevent \fBapache2\fP(8) from starting if this unit's checks should fail, then you could do: # systemctl add\-requires apache2.service seedd\-wait Or equivalently (which is what the above command does): .nh .nf # mkdir /etc/systemd/system/apache2.service.requires # ln \-s /lib/systemd/system/seedd\-wait.service /etc/systemd/system/apache2.service.requires .fi .hy Which will work for older systems where \fBsystemctl\fP does not yet support the \fBadd\-requires\fP command, and with generated units (such as those for services which still provide only a SysV init script), which at the time of writing \fBsystemctl\fP failed to support. Any number of other units may have a \fBRequires\fP dependency retrofitted like this, or may even include it in their own unit file if appropriate. .SS Go big or go visit the server room If you want the strongest guarantee for all normal services running on the system, so that none of them will be started if this initial boot test fails, then you can do something like the following, which if it fails will put the system into a minimal single-user mode with only an emergency admin shell available to someone with console access who knows the root password: .nh .nf # mkdir /etc/systemd/system/seedd\-wait.service.d/ # cat > /etc/systemd/system/seedd\-wait.service.d/failure.conf <. You can send bug reports, feature requests, praise and complaints to support@bitbabbler.org. bit-babbler-0.9/doc/virtual_machines0000644000000000000000000004000214136173163014443 0ustar Using BitBabbler devices in a virtual machine ============================================= One of the early motivations behind this project was the need for a good entropy source inside virtual machines. There are several ways to do this, including using the virtio-rng device supported by QEMU and/or EGD to import entropy from the host to the guest, but our initial experience with that was somewhat underwhelming. Hopefully it will have improved and be more stable and usable by the time people from the future read this -- but for now this document is just going to focus on what is needed to use a BitBabbler device directly inside a VM. In theory, that should be a simple task. Just use USB-passthrough to grant the guest system access to the desired device(s), and then just use it in the same way that you normally would. But I wouldn't be writing this if things were really that easy ... The examples here are centered around a KVM/QEMU system managed by libvirt, but the general principles of what is needed should extrapolate fairly well to other virtualisation systems too. The best and most complete solution that we have at present requires erecting some scaffolding around libvirt to provide the support for USB hotplug which it is currently missing and help us plaster over some of the other cracks that needed functionality would otherwise fall into and become lost. There are more details on all that in the bbvirt(1) manual page, what follows here is the quick-start guide for assembling it. Install the udev rules. ----------------------- If you installed the bit-babbler Debian package, this is already done. If you didn't, you'll want to copy the file debian/bit-babbler.udev to somewhere like /etc/udev/rules.d/60-bit-babbler.rules. Ensure that the two rules which RUN bbvirt in it point to the actual location of that script on your system. By default it is expected in /usr/bin, but if you built the bit-babbler source with ./configure && make and didn't change the prefix it may be in /usr/local/bin instead. Configure the device assignments that you want. ----------------------------------------------- The /etc/bit-babbler/vm.conf file is where you define the defaults you want to use for which device will be assigned to which virtual machine. If you are also running seedd(1) on the host machine you will need to explicitly define which devices will be used on the host too, which is set in /etc/default/seedd if you're using the Debian package init script (or something derived from it) to start that daemon. Install the libvirt hook. ------------------------- The example file for that can be found in libvirt/qemu-hook of the bit-babbler source package, or /usr/share/doc/bit-babbler/examples/qemu-hook if the Debian binary package is installed. It either needs to be copied to be the libvirt 'qemu' hook (typically /etc/libvirt/hooks/qemu) or merged with whatever is already there if you do already have a qemu hook installed, since there can be only one such hook with current libvirt (which is why we can't safely install this automatically at present). If there was no qemu hook installed previously, you will need to restart the libvirtd process once this is done (but that won't disturb any of your already running guests). Share and enjoy! ---------------- If that all went as planned, the BitBabbler devices you assigned to the guest domains should now be hotplugged into them whenever the devices are inserted or the guest machine is started. If the devices were already plugged in, and the guests were already running, you can synthesise a udev event to attach them to the guests immediately with: # udevadm trigger -c change -s usb -a "idVendor=0403" -a "idProduct=7840" Guest domain configuration. --------------------------- With the above all in place, there is no need to make any change to the guest domain definitions for the devices you want attached to them. They will be dynamically added to them using the logical address reported by udev. There is one change still worth making to them if you haven't already though. To get the full throughput from the BitBabbler devices they will need to be attached to a USB2.0 capable port in the guest. However the default libvirt configuration generally only creates a USB1.1 host controller, which will be a significant bottleneck for getting bits out of them. The easiest way to ensure that is to simply change the USB to be an XHCI device, since it will automatically support both USB3.0 devices and all lower speed devices without the need to individually configure a full set of companion controllers for lower speed devices. It's also supposed to be a more efficient driver. It's a bit more bleeding edge than the others, and may have some issues to shake out (the Debian kFreeBSD kernel did not support XHCI when we were testing the BitBabbler support for it, and bugs are still being fixed in the Linux kernel for it) but mostly it's been working quite well for us. To do that, you'll want to replace the USB section with something like this:
Where 'slot' is any free PCI slot not already in use by any other controller in the guest. =============================================================================== You can fairly safely ignore everything below here now, as the above is both all you should need and our best recommendation so far. What follows remains mostly as a historical note of things we tried that fell short of being an entirely satisfactory solution in one or (usually) more ways. The easy case ------------- If you only have one BitBabbler device in your host machine, and it is only hosting one guest VM, and you want to use it in the guest and not the host, then things actually are pretty simple. Assuming your VM already has USB support enabled (which libvirt will do by default), you just need to add a configuration like this to your domain (inside the section): You can either edit the domain config directly to add it, in which case the device will be available the next time the VM is started, or you can add it to an already running VM by saving the above to a file (say bb.xml) and then running: # virsh attach-device yourdomain ./bb.xml --live At which point the device will appear in the running VM as a normal hotplug event, the same way it appears on a running host when first plugged in. This simplicity ends somewhat abruptly though if you have more than one BitBabbler in the host machine, because they will all share the same vendor and product ID making the configuration above ambiguous ... The realistic case ------------------ With hardware becoming ever more powerful, running a single guest VM on the host is more likely to be the exception than the rule. The general case that we need to be able to support is a host running any number of guest VMs, with any number of BitBabbler devices available, with full control of how many (and which) of them are to be allocated to each guest, or to the host itself. Since each BitBabbler device has a unique serial number, this would still be simple if the configuration above was able to use that to distinguish them from each other, but neither QEMU nor libvirt supports using that information at the present time. The only way they give us to distinguish between multiple devices that have the same vendor and product ID is to look at where it is connected to the USB bus. The libvirt configuration allows us to replace the and with an address specification of the form:
We can get the required bus and device number from a device scan like this: # seedd -sv Have 3 devices: Bus:Dev VID:PID 003:006 0403:7840 Serial 'GK9VZF', ... port 3-2 005:002 0403:7840 Serial 'GDYXTH', ... port 5-4 005:007 0403:7840 Serial 'GTH4R8', ... port 5-2.3 So the above
refers here to the device with serial number GK9VZF. The good news is, this can be used with the same, simple, managed configuration as above. The bad news is, the 'device' number is not a constant, and it will change each time the device is unplugged and replugged, and there is a good probability that it will be different every time the host is rebooted, even if the devices remain plugged in exactly as they were before ... Which means this is fine if you want a quick way to add a specific device to a guest VM on the fly in an ad hoc manner - but it's not going to be a reliable way to set up a static configuration which should survive a host reboot. Right now, the only way to achieve that is to get our hands dirty at a lower level than the normal libvirt interface. QEMU offers us a somewhat more workable, but still less than ideal solution. It allows us to choose the device address by bus number and port, rather than device number. Which means we can select the device by where it is physically plugged into the host machine. That in turn brings its own potential for new problems, but if nobody moves the devices it will be stable through a reboot, and likewise through the device being removed and replaced into the same slot. Essentially we'll be assigning a physical port on the host to the guest, and whatever device is plugged into that slot, will then belong to the guest VM. In order to do this though, we need to step outside the managed niceties of libvirt, which means we need to handle a few more of the things that it does with our own alternative configuration. The first thing we need to do is ensure that the libvirt managed QEMU session will have permission to access the device. The precise details of this will vary based on how libvirt is configured on your system but typically it will run QEMU as an unprivileged user on the host. We need to give that user, or a group that user is in, permission to access the relevant USB device files. We can do that with udev, using a configuration something like the following, placed into a configuration file in /etc/udev/rules.d: ACTION=="add", SUBSYSTEM=="usb", \ ATTR{idVendor}=="0403", ATTR{idProduct}=="7840", ATTR{serial}=="GK9VZF", \ GROUP="libvirt-qemu" Where the serial number and GROUP (or USER) will be set to suit your system. Multiple rules like this can be added for multiple devices, or wildcards used to match multiple devices in a single rule, but each device that is to be assigned to a guest VM must have its permission set this way. Any BitBabbler device that will only be used by the host does not need to be (and probably should not be) included here. This configuration will take effect the next time the device is plugged in, or when the machine is rebooted. The next thing we need to do is configure the libvirt domain to add the device by bus and port number. To do this we need to directly supply options for the QEMU command line. The important parts of that will look something like this: ... ... Which would add all three devices reported by the scan above to this guest VM. The important things to note here are the 'xmlns:qemu' option, which must be included in the tag for the section to be valid, and the hostbus/hostport values, which can be obtained for each device from the scan output (eg. 'port 5-2.3' equates to hostbus=5,hostport=2.3). With this configuration in place, the device should become available in the guest VM the next time it is restarted (note that it is not sufficient to just reboot the guest OS, the VM itself must actually be halted and restarted). It is also possible to hotplug a device into a running VM using the QEMU monitor, but since the libvirt managed method works for that too, I won't detail that here. If you've followed what all the above does, it shouldn't be hard to figure out if you really do ever need that. The final configuration change for the VM, which is not strictly required, but is highly recommended, is to ensure the device is attached to a USB2.0 port (or better) inside the guest. The default libvirt configuration generally only creates a USB1.1 host controller, and if the BitBabbler device is attached to that then its throughput will be well below what it is really capable of. The easiest way to ensure that is to simply change the default USB to be an XHCI device, since it will automatically support both USB3.0 devices and all lower speed devices without the need to individually configure a full set of companion controllers for lower speed devices. It's also supposed to be a more efficient driver. It's a bit more bleeding edge than the others, and may have some issues to shake out, but so far we've not run into them, at least not with the BitBabbler devices. To do that, you'll want to replace the USB section with something like:
Where 'slot' is a free PCI slot not already in use by any other controller. Armed with these workarounds, that should be about all you need for just about any configuration. There are some variations on the themes that are possible, and more options that can be specified to more precisely control how and where devices are connected inside the guest, but we'll defer to the QEMU and libvirt documentation for details of those. Hopefully in the not too distant future, libvirt will grow some extra layers of user-friendly to make much of this be no longer necessary, but until then we need to play with the hand we're dealt. One last thing to note, which isn't directly related to the VM configuration, is that you must ensure nothing on the host claims any of the devices which are to be allocated to guest VMs before the guest itself has been able to claim it (after that, attempts to access it from the host should be refused since the device will already be claimed). Mostly this means that if you are also running seedd on the host, you need to explicitly specify which device(s) it should use with the --device-id option, otherwise it will automatically claim all of them, and it will probably do that before the guest VMs have had time to boot. Dealing with cgroups -------------------- If using the override on a system with cgroups active, then libvirt will not automatically add the USB device to the set of allowed devices like it would when using a section, and since the USB devices are not in the default set, they will need to be added explicitly. To do this we again need a known and stable name for the device, but we can do that by creating a symlink with the udev rule like this: ACTION=="add", SUBSYSTEM=="usb", \ ATTR{idVendor}=="0403", ATTR{idProduct}=="7840", ATTR{serial}=="GK9VZF", \ GROUP="libvirt-qemu", SYMLINK="bitbabbler/$attr{serial}" Which will ensure that /dev/bitbabbler/GK9VZF is a link to the real USB device node when it is plugged in. We can then have it added to the cgroup for the virtual machines by setting the following in /etc/libvirt/qemu.conf: cgroup_device_acl = [ "/dev/null", "/dev/full", "/dev/zero", "/dev/random", "/dev/urandom", "/dev/ptmx", "/dev/kvm", "/dev/kqemu", "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", "/dev/bitbabbler/GK9VZF" ] You can add as many devices there as you require for use in virtual machines. bit-babbler-0.9/include/0002755000000000000000000000000014136173163012047 5ustar bit-babbler-0.9/include/bit-babbler/0002755000000000000000000000000014136173163014214 5ustar bit-babbler-0.9/include/bit-babbler/aligned_recast.h0000644000000000000000000000646514136173163017342 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2015 - 2021, Ron #ifndef _BB_ALIGNED_RECAST_H #define _BB_ALIGNED_RECAST_H #include #include namespace BitB { template< typename T > struct related_type { typedef void* void_type; }; template< typename T > struct related_type< const T* > { typedef const void* void_type; }; template< typename T > struct alignment_of { enum { value = __alignof__(T) }; }; template< typename T > struct alignment_of< T* > { enum { value = __alignof__(T) }; }; // Return true if pointer p is aligned to some multiple of S. template< size_t S > BB_CONST bool IsAligned( const void *p ) { //{{{ if( S & (S - 1) ) { // We shouldn't normally ever be here, the natural alignment of types // on most platforms is always a power of 2, and checking that should // be faster than the modulus here. But since this should get compiled // out as dead code if we don't need it, there's no harm in also having // a fully generic implementation just in case we ever really do. if( __builtin_expect(reinterpret_cast(p) % S, 0) ) return false; } else { if( __builtin_expect(reinterpret_cast(p) & (S - 1), 0) ) return false; } return true; } //}}} // Return true if pointer p is aligned to some multiple of the alignment of type T. template< typename T > BB_CONST bool IsAligned( const void *p ) { return IsAligned< alignment_of::value >( p ); } // For some reason GCC 4.9.2 thinks aligned_recast() can be declared const, // but that seems wrong because it can throw and calls stringprintf, and // empirically, if we declare these with the const attribute then the unit // tests fail ... so squelch the warning. EM_TRY_PUSH_DIAGNOSTIC_IGNORE("-Wsuggest-attribute=const") // Safe cast back to a type with increased alignment. // // This will cast pointer p, to type T, after asserting that it is already // suitably aligned to some multiple of S. // // The main use for this is portably squelching -Wcast-align warnings where // it is certain that the actual alignment of the pointer being punned will // always be sufficient, with a runtime check to assert that really is true. template< typename T, size_t S, typename P > T aligned_recast( P p ) { //{{{ if( ! IsAligned( p ) ) throw std::invalid_argument( stringprintf( "aligned_recast: %s %p has alignment < %zu in cast to %s", EM_TYPEOF(P), p, S, EM_TYPEOF(T) ) ); return reinterpret_cast( static_cast< typename related_type

::void_type >( p ) ); } //}}} // Cast pointer p, to type T, after asserting that it is already suitably // aligned to some multiple of the alignment of type T. template< typename T, typename P > T aligned_recast( P p ) { return aligned_recast< T, alignment_of::value >( p ); } EM_POP_DIAGNOSTIC } #endif // _BB_ALIGNED_RECAST_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/chisq.h0000644000000000000000000001151314136173163015473 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2014 - 2015, Ron // // The implementation of poz() and pochisq() were taken almost verbatim from // the public domain ENT test suite, which in turn also took it with minimal // modification from public domain code by Gary Perlman of the Wang Institute. // The fine tradition of minimal modification is continued here, with changes // mostly to keep modern gcc from barking at us about it and to encapsulate it // a bit more tightly. So as is usual in such cases, the blame for any damage // to Gary and John's careful work entirely stops here. // // But if it works perfectly, all credit rightly belongs to them. // // The original code for ENT can be found at // The changes to poz() and pochisq() made in this file are released into the // public domain as well. #ifndef _BB_CHISQ_H #define _BB_CHISQ_H #include namespace BitB { namespace QA { // Return the cumulative probability from -oo to a normal z value static inline double poz(const double z) { //{{{ // Adapted from a polynomial approximation in: // Ibbetson D, Algorithm 209 // Collected Algorithms of the CACM 1963 p. 616 // // This routine has six digit accuracy, so it is only useful for // absolute z values < 6. For z values >= to 6.0, it returns 0.0. static const double Z_MAX = 6.0; double y, x, w; if (z > 0.0 || z < 0.0) { y = 0.5 * fabs(z); if (y >= (Z_MAX * 0.5)) { x = 1.0; } else if (y < 1.0) { w = y * y; x = ((((((((0.000124818987 * w - 0.001075204047) * w + 0.005198775019) * w - 0.019198292004) * w + 0.059054035642) * w - 0.151968751364) * w + 0.319152932694) * w - 0.531923007300) * w + 0.797884560593) * y * 2.0; } else { y -= 2.0; x = (((((((((((((-0.000045255659 * y + 0.000152529290) * y - 0.000019538132) * y - 0.000676904986) * y + 0.001390604284) * y - 0.000794620820) * y - 0.002034254874) * y + 0.006549791214) * y - 0.010557625006) * y + 0.011630447319) * y - 0.009279453341) * y + 0.005353579108) * y - 0.002141268741) * y + 0.000535310849) * y + 0.999936657524; } } else { x = 0.0; } return z > 0.0 ? ((x + 1.0) * 0.5) : ((1.0 - x) * 0.5); } //}}} // Return the probability of chi-squared value x with df degrees of freedom static inline double pochisq( double x, unsigned df ) { //{{{ // Adapted from: // Hill, I. D. and Pike, M. C. Algorithm 299 // Collected Algorithms for the CACM 1967 p. 243 // // Updated for rounding errors based on remark in ACM TOMS // June 1985, page 185 static const double LOG_SQRT_PI = 0.5723649429247000870717135; /* log(sqrt(pi)) */ static const double I_SQRT_PI = 0.5641895835477562869480795; /* 1 / sqrt(pi) */ static const double BIGX = 20.0; /* max value to represent exp(x) */ struct local { static double ex( double n ) { return n < -BIGX ? 0.0 : exp(n); } }; double a, y, s; double e, c, z; bool even; /* true if df is an even number */ if (x <= 0.0 || df < 1) return 1.0; // gcc (up to 4.9.1) is not smart enough to realise y will never // be accessed if df==1 and complains about it being uninitialized. // It's probably smart enough to optimise this away again though. y = 0.0; a = 0.5 * x; even = (2 * (df / 2)) == df; if (df > 1) y = local::ex(-a); s = (even ? y : (2.0 * poz(-sqrt(x)))); if (df > 2) { x = 0.5 * (df - 1); z = (even ? 1.0 : 0.5); if (a > BIGX) { e = (even ? 0.0 : LOG_SQRT_PI); c = log(a); while (z <= x) { e = log(z) + e; s += local::ex(c * z - a - e); z += 1.0; } return s; } e = (even ? 1.0 : (I_SQRT_PI / sqrt(a))); c = 0.0; while (z <= x) { e = e * (a / z); c = c + e; z += 1.0; } return c * y + s; } return s; } //}}} } // QA namespace } // BitB namespace #endif // _BB_CHISQ_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/client-socket.h0000644000000000000000000001715514136173163017140 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2004 - 2018, Ron #ifndef _BB_CLIENT_SOCKET_H #define _BB_CLIENT_SOCKET_H #include #include namespace BitB { class ClientSock : public RefCounted { //{{{ private: #if EM_PLATFORM_MSW WinsockScope m_winsock; #endif std::string m_id; int m_fd; size_t m_maxsize; size_t m_fill; char *m_buf; void ConnectSocket( int domain, int type, int protocol, const struct sockaddr *addr, socklen_t addrlen ) { //{{{ m_fd = socket( domain, type, protocol ); if( m_fd == -1 ) throw SocketError( _("ClientSock( %s ): failed to create socket"), m_id.c_str() ); if( connect( m_fd, addr, addrlen ) == -1 ) throw SocketError( _("ClientSock( %s ): failed to connect socket"), m_id.c_str() ); } //}}} void CreateUnixSocket( const std::string &path ) { //{{{ #if EM_PLATFORM_POSIX sockaddr_any_t addr; if( path.size() >= sizeof(addr.un.sun_path) ) throw Error( _("ClientSock: socket path '%s' is too long. " "Maximum length is %zu bytes."), path.c_str(), sizeof(addr.un.sun_path) - 1 ); addr.un.sun_family = AF_UNIX; path.copy( addr.un.sun_path, sizeof(addr.un.sun_path) - 1 ); addr.un.sun_path[ path.size() ] = '\0'; ConnectSocket( AF_UNIX, SOCK_STREAM, 0, &addr.any, sizeof(addr.un) ); #else (void)path; throw Error("Unix sockets are not supported on this platform"); #endif } //}}} void CreateTCPSocket( const std::string &addr ) { //{{{ SockAddr sa( addr ); sa.GetAddrInfo( SOCK_STREAM, AI_ADDRCONFIG ); if( sa.addr.any.sa_family != AF_INET && sa.addr.any.sa_family != AF_INET6 ) throw Error( _("ClientSock( %s ): not an IPv4 or IPv6 address (family %u)"), m_id.c_str(), sa.addr.any.sa_family ); ConnectSocket( sa.addr.any.sa_family, sa.addr_type, sa.addr_protocol, &sa.addr.any, sa.addr_len ); } //}}} size_t do_read( char *buf, size_t size, Json::Handle &json ) { //{{{ for(;;) { if( m_fill ) { size_t len = strnlen( m_buf, m_fill ); if( len < m_fill ) { // We have a null terminated reply if( buf ) { if( len >= size ) throw Error( _("ClientSock::read( %zu ): buffer too small" " for %zu byte reply"), size, len + 1 ); memcpy( buf, m_buf, len + 1 ); } else json = new Json( m_buf ); if( len == m_fill - 1 ) { // and no bytes from the next reply yet m_fill = 0; return len + 1; } // there is (at least the start of) another reply memmove( m_buf, m_buf + len + 1, m_fill - len - 1 ); m_fill -= len + 1; return len + 1; } else { // We haven't read the whole reply yet if( m_fill == m_maxsize ) { // we have a whole buffer full of data now, // but there was no request terminator seen, // so whatever is in there must be invalid. m_fill = 0; throw Error( _("ClientSock::read( %zu ): max message size exceeded, " "read %zu bytes with no terminator"), size, m_maxsize ); } // else // we haven't filled the whole buffer yet, // so maybe this was just a short read. // Try to read some more. } } ssize_t n = recv( m_fd, m_buf + m_fill, m_maxsize - m_fill, 0 ); if( n < 0 ) throw SocketError( _("ClientSock::read( %zu ): failed"), size ); if( n == 0 ) { m_fill = 0; throw Error( _("ClientSock::read( %zu ): EOF"), size ); } Log<4>( "ClientSock::read( %zu ): %zu bytes at %zu\n", size, n, m_fill ); m_fill += size_t(n); } } //}}} void Close() { //{{{ if( m_fd == -1 ) return; #if EM_PLATFORM_MSW closesocket( m_fd ); #else close( m_fd ); #endif } //}}} public: typedef RefPtr< ClientSock > Handle; ClientSock( const std::string &addr, size_t max_msg_size = 64 * 1024 * 1024 ) : m_id( addr ) , m_fd( -1 ) , m_maxsize( max_msg_size ) , m_fill( 0 ) { Log<2>( "+ ClientSock( '%s', %zu )\n", m_id.c_str(), m_maxsize ); if( addr.find("tcp:") == 0 ) CreateTCPSocket( addr.substr(4) ); else CreateUnixSocket( addr ); m_buf = new char[m_maxsize]; } ~ClientSock() { Log<2>( "- ClientSock( '%s', %zu )\n", m_id.c_str(), m_maxsize ); Close(); delete [] m_buf; } // Low-level I/O size_t write( const char *buf, size_t len ) { //{{{ ssize_t n = send( m_fd, buf, len, 0 ); if( n < 0 ) throw SocketError( _("ClientSock::write( %zu ): failed"), len ); return size_t(n); } //}}} size_t read( char *buf, size_t size ) { Json::Handle unused; return do_read( buf, size, unused ); } // High-level I/O void SendRequest( const std::string &req ) { //{{{ size_t n = req.size() + 1; Log<3>( _("ClientSock::SendRequest: '%s'\n"), req.c_str() ); for( size_t c = n; c; ) { ssize_t w = send( m_fd, req.c_str() + n - c, c, 0 ); if( w < 0 ) throw SocketError( _("ClientSock::SendRequest: write failed") ); if( w == 0 ) throw Error( _("ClientSock::SendRequest: write EOF") ); c -= size_t(w); } } //}}} Json::Handle Read() { Json::Handle json; do_read( NULL, 0, json ); return json; } }; //}}} } // BitB namespace #endif // _BB_CLIENT_SOCKET_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/control-socket.h0000644000000000000000000007676514136173163017356 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2004 - 2018, Ron #ifndef _BB_CONTROL_SOCKET_H #define _BB_CONTROL_SOCKET_H #include #include #include #include #if EM_PLATFORM_POSIX #include #include #include #endif namespace BitB { class ControlSock : public RefCounted { //{{{ private: class Connection : public RefCounted { //{{{ private: ControlSock *m_server; int m_fd; pthread_t m_connectionthread; void send_response( const std::string &msg ) { //{{{ size_t n = msg.size() + 1; Log<3>( "ControlSock::Connection( %d )::send_response: %zu bytes\n", m_fd, n ); for( size_t c = n; c; ) { ssize_t w = send( m_fd, msg.c_str() + n - c, c, 0 ); if( w < 0 ) throw SocketError( _("ControlSock::Connection( %d ): write failed"), m_fd ); if( w == 0 ) throw Error( _("ControlSock::Connection( %d ): write EOF"), m_fd ); c -= size_t(w); } } //}}} void process_request( const std::string &req, const std::string &cmd, size_t token = 0, const Json::Handle &json = Json::Handle() ) { //{{{ if( cmd == "GetIDs" ) { send_response( "[\"GetIDs\"," + stringprintf("%zu,", token) + Monitor::GetIDs() + ']' ); return; } if( cmd == "ReportStats" ) { std::string id; if( json.IsNotNULL() ) id = json->Get(2); send_response( "[\"ReportStats\"," + stringprintf("%zu,", token) + Monitor::GetStats(id) + ']' ); return; } if( cmd == "GetRawData" ) { std::string id; if( json.IsNotNULL() ) id = json->Get(2); send_response( "[\"GetRawData\"," + stringprintf("%zu,", token) + Monitor::GetRawData(id) + ']' ); return; } if( cmd == "SetLogVerbosity" ) { if( json.IsNotNULL() ) opt_verbose = json[2]->As(); Log<0>( "Log verbosity is now %d\n", opt_verbose ); send_response( stringprintf( "[\"SetLogVerbosity\",%zu,%d]", token, opt_verbose ) ); return; } send_response( "[\"UnknownRequest\"," + stringprintf("%zu,\"", token) + Json::Escape(req) + "\"]" ); } //}}} void parse_request( const std::string &req ) { //{{{ std::string error; Json json( req, error ); if( ! error.empty() ) { Log<0>( "ControlSock::Connection( %d )::parse_request: " "bad request: '%s' -> '%s'\n", m_fd, req.c_str(), error.c_str() ); send_response( "[\"BadRequest\",0," "{\"Error\":\"" + Json::Escape(error) + "\"" ",\"Request\":\"" + Json::Escape(req) + "\"}]" ); return; } Log<4>( "ControlSock::Connection( %d )::parse_request: '%s' -> '%s'\n", m_fd, req.c_str(), json.JSONStr().c_str() ); if( json.RootType() == Json::StringType ) { process_request( req, json ); return; } if( json.RootType() == Json::ArrayType ) { try { process_request( req, json[0], json[1]->As(), json ); return; } catch( const abi::__forced_unwind& ) { throw; } catch( const std::exception &e ) { error = e.what(); } catch( ... ) { error = "Unknown exception"; } send_response( "[\"BadRequest\",0," "{\"Error\":\"" + Json::Escape(error) + "\"" ",\"Request\":\"" + Json::Escape(req) + "\"}]" ); return; } send_response( "[\"BadRequest\",0," "{\"Error\":\"Invalid request, not an array or string\"" ",\"Request\":\"" + Json::Escape(req) + "\"}]" ); } //}}} void do_connection_thread() { //{{{ using std::string; SetThreadName( "control connect" ); Log<3>( "ControlSock::Connection( %d ): begin connection_thread\n", m_fd ); char buf[1024]; size_t f = 0; for(;;) { ssize_t n = recv( m_fd, buf + f, sizeof(buf) - f, 0 ); if( n < 0 ) throw SocketError( _("ControlSock::Connection( %d ): read failed"), m_fd ); if( n == 0 ) { Log<3>( "ControlSock::Connection( %d ): read EOF\n", m_fd ); return; } Log<3>( "ControlSock::Connection( %d ): read %zu bytes at %zu\n", m_fd, n, f ); f += size_t(n); size_t b = 0; for(;;) { size_t len = strnlen( buf + b, f - b ); if( len < f - b ) { // We have a null terminated request parse_request( string( buf + b, len ) ); if( len == f - b - 1 ) { // and no bytes from the next request yet f = 0; break; } // there is (at least the start of) another request b += len + 1; } else { // We haven't read the whole request yet if( b > 0 ) { // clear out the previous request(s) to make as // much room as we can to read the rest of it f -= b; memmove( buf, buf + b, f ); } else if( f == sizeof(buf) ) { // we have a whole buffer full of data now, // but there was no request terminator seen, // so whatever is in there must be invalid. send_response( "[\"BadRequest\",0," "{\"Error\":\"Request too large\"" ",\"Request\":\"" + Json::Escape( string(buf,f) ) + "\"}]" ); f = 0; } // else // we haven't filled the whole buffer yet, // so maybe this was just a short read. // Try to read some more. break; } } } } //}}} static void *connection_thread( void *p ) { //{{{ Connection::Handle c = static_cast(p); // Drop the 'virtual handle' from the ctor, we have a real one now. c->Unref(); try { c->do_connection_thread(); } catch( const abi::__forced_unwind& ) { Log<3>( "ControlSock::Connection( %d ): connection_thread cancelled\n", c->m_fd ); throw; } BB_CATCH_STD( 0, _("uncaught ControlSock::connection_thread exception") ) c->m_server->detach_connection( c ); return NULL; } //}}} void Close() { //{{{ #if EM_PLATFORM_MSW closesocket( m_fd ); #else close( m_fd ); #endif } //}}} public: typedef RefPtr< Connection > Handle; typedef std::list< Handle > List; Connection( ControlSock *server, int fd ) : m_server( server ) , m_fd( fd ) { //{{{ Log<2>( "+ ControlSock::Connection( %d )\n", fd ); // Bump the refcount until the thread is started, otherwise we may // lose a race with this Connection being released by the caller // before the thread can take its handle from the raw pointer. // Think of it as a virtual Handle passed with pthread_create. Ref(); // We don't need to Unref() if this fails, because we'll throw // and it will never have been constructed to be destroyed ... int ret = pthread_create( &m_connectionthread, GetDefaultThreadAttr(), connection_thread, this ); if( ret ) { Close(); throw SystemError( ret, _("ControlSock: failed to create connection thread") ); } } //}}} ~Connection() { Log<2>( "- ControlSock::Connection( %d )\n", m_fd ); Close(); } pthread_t ThreadID() const { return m_connectionthread; } }; //}}} #if EM_PLATFORM_MSW WinsockScope m_winsock; #endif std::string m_id; int m_fd; int m_serverthread_err; pthread_t m_serverthread; pthread_mutex_t m_servermutex; Connection::List m_connections; void detach_connection( const Connection::Handle &c ) { //{{{ ScopedMutex lock( &m_servermutex ); for( Connection::List::iterator i = m_connections.begin(), e = m_connections.end(); i != e; ++i ) { if( (*i) == c ) { m_connections.erase( i ); pthread_detach( c->ThreadID() ); return; } } } //}}} bool is_fatal_error() { //{{{ #if EM_PLATFORM_MSW int err = WSAGetLastError(); return err != WSAEWOULDBLOCK || err != WSAEINTR; #else return errno != EAGAIN || errno != EINTR; #endif } //}}} BB_NORETURN void do_server_thread() { //{{{ SetThreadName( "control socket" ); Log<3>( "ControlSock( %s ): begin server_thread\n", m_id.c_str() ); for(;;) { int fd = accept( m_fd, NULL, NULL ); if( fd == -1 ) { if( is_fatal_error() ) throw SocketError( _("ControlSock( %s ): accept failed"), m_id.c_str() ); LogSocketErr<1>( _("ControlSock( %s ): accept failed"), m_id.c_str() ); continue; } try { ScopedMutex lock( &m_servermutex ); m_connections.push_back( new Connection( this, fd ) ); } BB_CATCH_ALL( 0, _("ControlSock: failed to create new Connection") ) } } //}}} static void *server_thread( void *p ) { //{{{ ControlSock *c = static_cast( p ); try { c->do_server_thread(); } catch( const abi::__forced_unwind& ) { Log<3>( "ControlSock( %s ): server_thread cancelled\n", c->m_id.c_str() ); throw; } BB_CATCH_STD( 0, _("uncaught ControlSock::server_thread exception") ) return NULL; } //}}} protected: void ListenSocket( int domain, int type, int protocol, const struct sockaddr *addr, socklen_t addrlen, bool freebind = false ) { //{{{ static const int LISTEN_BACKLOG = 5; m_fd = socket( domain, type, protocol ); if( m_fd == -1 ) throw SocketError( _("ControlSock( %s ): failed to create socket"), m_id.c_str() ); if( freebind ) EnableFreebind( m_fd, stringprintf("ControlSock( %s )", m_id.c_str()) ); if( bind( m_fd, addr, addrlen ) == -1 ) throw SocketError( _("ControlSock( %s ): failed to bind socket"), m_id.c_str() ); if( listen( m_fd, LISTEN_BACKLOG ) == -1 ) throw SocketError( _("ControlSock( %s ): failed to listen on socket"), m_id.c_str() ); } //}}} void start_server_thread() { //{{{ m_serverthread_err = pthread_create( &m_serverthread, GetDefaultThreadAttr(), server_thread, this ); if( m_serverthread_err ) { pthread_mutex_destroy( &m_servermutex ); throw SystemError( m_serverthread_err, _("ControlSock( %s ): failed to create server thread"), m_id.c_str() ); } } //}}} bool HaveSocket() const { return m_fd != -1; } void Close() { //{{{ if( m_fd == -1 ) return; #if EM_PLATFORM_MSW closesocket( m_fd ); #else close( m_fd ); #endif } //}}} public: typedef RefPtr< ControlSock > Handle; ControlSock( const std::string &id ) : m_id( id ) , m_fd( -1 ) , m_serverthread_err( ESRCH ) { pthread_mutex_init( &m_servermutex, NULL ); } ~ControlSock() { //{{{ Log<2>( "- ControlSock( '%s' )\n", m_id.c_str() ); if( m_serverthread_err == 0 ) { Log<3>( "ControlSock: terminating server\n" ); pthread_cancel( m_serverthread ); Log<3>( "ControlSock: waiting for server termination\n" ); pthread_join( m_serverthread, NULL ); pthread_mutex_lock( &m_servermutex ); Log<3>( "ControlSock: terminating connections\n" ); for( Connection::List::iterator i = m_connections.begin(), e = m_connections.end(); i != e; ++i ) pthread_cancel( (*i)->ThreadID() ); Log<3>( "ControlSock: waiting for connection termination\n" ); while( ! m_connections.empty() ) { pthread_t p = m_connections.back()->ThreadID(); m_connections.pop_back(); pthread_mutex_unlock( &m_servermutex ); pthread_join( p, NULL ); pthread_mutex_lock( &m_servermutex ); } pthread_mutex_unlock( &m_servermutex ); } pthread_mutex_destroy( &m_servermutex ); Close(); } //}}} }; //}}} class ControlSockUnix : public ControlSock { //{{{ private: #if EM_PLATFORM_POSIX sockaddr_any_t m_addr; std::string m_group; gid_t m_gid; int m_lockfd; bool using_group() const { return m_gid != gid_t(-1); } void create_socket_dir( const std::string &path ) { //{{{ // We already asserted path is not empty before calling this. if( path[0] != '/' ) throw Error( _("ControlSock( '%s' ): path is not absolute"), path.c_str() ); if( path[path.size() - 1] == '/' ) throw Error( _("ControlSock( '%s' ): path ends with trailing '/'"), path.c_str() ); std::string dir = path.substr( 0, path.rfind('/') ); if( dir.empty() ) throw Error( _("ControlSock( '%s' ): " "cowardly refusing to create socket in the root directory"), path.c_str() ); mode_t dirmode = S_IRUSR | S_IWUSR | S_IXUSR; if( using_group() ) dirmode |= S_IRGRP | S_IWGRP | S_IXGRP; try_again: if( mkdir( dir.c_str(), dirmode ) == -1 ) { switch( errno ) { case ENOENT: create_socket_dir( dir ); goto try_again; case EEXIST: { struct stat s; if( lstat( dir.c_str(), &s ) ) throw SystemError( _("ControlSock( %s ): failed to stat '%s'"), path.c_str(), dir.c_str() ); if( ! S_ISDIR(s.st_mode) ) throw Error( _("ControlSock( %s ): '%s' exists and is not a directory"), path.c_str(), dir.c_str() ); if( (s.st_mode & 07777) != dirmode ) throw Error( _("ControlSock( %s ): '%s' exists but is not mode %.4o"), path.c_str(), dir.c_str(), dirmode ); if( s.st_uid != geteuid() ) throw Error( _("ControlSock( %s ): '%s' exists but is not owned by us"), path.c_str(), dir.c_str() ); gid_t gid = using_group() ? m_gid : getegid(); if( s.st_gid != gid ) throw Error( _("ControlSock( %s ): " "'%s' exists but is not in the expected group"), path.c_str(), dir.c_str() ); return; } default: throw SystemError( _("ControlSock( %s ): failed to create directory '%s'"), path.c_str(), dir.c_str() ); } } // Force the desired mode, regardless of current umask. if( chmod( dir.c_str(), dirmode ) ) throw SystemError( _("ControlSock( %s ): failed to chmod %.4o '%s'"), path.c_str(), dirmode, dir.c_str() ); if( using_group() && chown( dir.c_str(), uid_t(-1), m_gid ) ) throw SystemError( _("ControlSock( %s ): failed to chown '%s' to group %s."), path.c_str(), dir.c_str(), m_group.c_str() ); } //}}} void acquire_socket_lock( const std::string &path ) { //{{{ // Take a lock in the socket dir, if we can get it any existing // socket is stale and we can safely remove it. If we can't, // then another process is running which still owns it. std::string lockfile = path + ".lock"; mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; m_lockfd = open( lockfile.c_str(), O_RDWR|O_CREAT, mode ); if( m_lockfd == -1 ) throw SystemError( _("ControlSock: failed to open socket lock '%s'"), lockfile.c_str() ); int ret = flock( m_lockfd, LOCK_EX | LOCK_NB ); if( ret ) { if( errno == EWOULDBLOCK ) throw Error( _("ControlSock( %s ): socket is owned by another process"), path.c_str() ); throw SystemError( _("ControlSock: failed to obtain socket lock '%s'"), lockfile.c_str() ); } // We own the lock. Write our PID to it, not for any particular // reason we rely on elsewhere, just to entertain curious people. // // Or maybe don't do that, it should be reasonably safe for us to // lock some arbitrary file that doesn't really belong to us after // all (at worst it will DoS something else trying to flock() it), // but that's a lesser evil than destroying its content, for no // real benefit to ourselves from doing so. We could do some more // paranoid sanity checking of it here too, but that's not really // worth it just to write something to it that it's likely nobody // will ever look at or care about anyway. #if 0 std::string pid = stringprintf( "%zd\n", ssize_t(getpid()) ); if( write( m_lockfd, pid.c_str(), pid.size() ) ) throw SystemError( _("ControlSock( %s ): failed to write pid to socket lock"), path.c_str() ); if( ftruncate( m_lockfd, pid.size() ) ) throw SystemError( _("ControlSock( %s ): failed to truncate socket lock"), path.c_str() ); if( fdatasync( m_lockfd ) ) throw SystemError( _("ControlSock( %s ): failed to sync socket lock"), path.c_str() ); #endif // Remove a stale socket file only if it really looks like something // that we actually might have left behind from a previous ungraceful // exit. We're privileged, we but don't have a licence to stomp upon // arbitrary files. Or at least not those that don't end in .lock ... struct stat s; if( lstat( path.c_str(), &s ) == 0 ) { if( ! S_ISSOCK(s.st_mode) ) throw Error( _("ControlSock: '%s' exists and is not a socket"), path.c_str() ); mode_t sockmode = S_IRUSR | S_IWUSR; if( using_group() ) sockmode |= S_IRGRP | S_IWGRP; if( (s.st_mode & 07777) != sockmode ) throw Error( _("ControlSock: '%s' exists but is not mode %.4o"), path.c_str(), sockmode ); if( s.st_uid != geteuid() ) throw Error( _("ControlSock: '%s' exists but is not owned by us"), path.c_str() ); gid_t gid = using_group() ? m_gid : getegid(); if( s.st_gid != gid ) throw Error( _("ControlSock: '%s' exists but is not in the expected group"), path.c_str() ); Log<1>( _("ControlSock( %s ): removing stale socket\n"), path.c_str() ); unlink( path.c_str() ); } } //}}} void listen_on_socket( const std::string &path ) { //{{{ acquire_socket_lock( path ); ListenSocket( AF_UNIX, SOCK_STREAM, 0, &m_addr.any, sizeof(m_addr.un) ); // The portable way to control access to the socket is via the permission // of its parent directory, which we already handle above, but Linux also // respects the permission on the socket itself, so if we're granting the // members of a group access to it, we need to make sure that it will be // at least that permissive too, regardless of what the umask might have // done to us. We could just 0666 it, but it's not much more work to be // explicit about what we intend. mode_t sockmode = S_IRUSR | S_IWUSR; if( using_group() ) sockmode |= S_IRGRP | S_IWGRP; if( chmod( path.c_str(), sockmode ) ) throw SystemError( _("ControlSock( %s ): failed to chmod %.4o socket"), path.c_str(), sockmode ); if( using_group() && chown( path.c_str(), uid_t(-1), m_gid ) ) throw SystemError( _("ControlSock( %s ): failed to chown socket to group %s."), path.c_str(), m_group.c_str() ); start_server_thread(); } //}}} void cleanup_files() { //{{{ // We don't try to remove the .lock file (for the same paranoid // reasons we don't currently write to it in acquire_socket_lock) // and we don't try to remove any directories we created because // it would be quite valid for multiple processes to share those // which makes it complicated to know with any certainty how far // back up the branch we ought to prune once the last user exits. // We don't necessarily want to remove all empty parent dirs just // because they are now empty, they might not be ours to remove, // even if they match our expected owner and mode. // // That's not a huge problem because usually they will be created // under (/var)/run, which on modern systems is typically reaped // at the next boot anyway (and sane users shouldn't be creating // dozens of them all over the place in normal use in any case). // The main way it could catch people is if they restart using a // different permitted group for socket access, but that should // be a rare thing, and isn't an entirely terrible sanity check // all of its own either. // // We do remove the socket though, mostly to avoid the dance with // checking if it really is stale and was ours at the next start. // // And we don't close m_fd here, the base destructor will do that. if( HaveSocket() ) unlink( m_addr.un.sun_path ); if( m_lockfd != -1 ) close( m_lockfd ); } //}}} #endif // EM_PLATFORM_POSIX public: typedef RefPtr< ControlSockUnix > Handle; ControlSockUnix( const std::string &path, const std::string &group = std::string() ) : ControlSock( path ) #if EM_PLATFORM_POSIX , m_group( group ) , m_gid( GetGID( m_group ) ) , m_lockfd( -1 ) #endif { //{{{ #if EM_PLATFORM_POSIX Log<2>( "+ ControlSockUnix( '%s' )\n", path.c_str() ); if( path.empty() ) throw Error( _("ControlSockUnix: no path specified") ); if( path.size() >= sizeof(m_addr.un.sun_path) ) throw Error( _("ControlSockUnix: socket path '%s' is too long. " "Maximum length is %zu bytes."), path.c_str(), sizeof(m_addr.un.sun_path) - 1 ); m_addr.un.sun_family = AF_UNIX; path.copy( m_addr.un.sun_path, sizeof(m_addr.un.sun_path) - 1 ); m_addr.un.sun_path[ path.size() ] = '\0'; create_socket_dir( path ); try { listen_on_socket( path ); } catch( ... ) { cleanup_files(); throw; } #else (void)group; throw Error("Unix sockets are not supported on this platform"); #endif } //}}} ~ControlSockUnix() { //{{{ #if EM_PLATFORM_POSIX cleanup_files(); #endif } //}}} }; //}}} class ControlSockTCP : public ControlSock { //{{{ public: typedef RefPtr< ControlSockTCP > Handle; ControlSockTCP( const std::string &addr, bool freebind = false ) : ControlSock( addr ) { Log<2>( "+ ControlSockTCP( '%s' )\n", addr.c_str() ); SockAddr sa( addr ); sa.GetAddrInfo( SOCK_STREAM, AI_ADDRCONFIG | AI_PASSIVE ); if( sa.addr.any.sa_family != AF_INET && sa.addr.any.sa_family != AF_INET6 ) throw Error( _("ControlSockTCP( %s ): not an IPv4 or IPv6 address (family %u)"), addr.c_str(), sa.addr.any.sa_family ); ListenSocket( sa.addr.any.sa_family, sa.addr_type, sa.addr_protocol, &sa.addr.any, sa.addr_len, freebind ); start_server_thread(); } }; //}}} static inline ControlSock::Handle CreateControlSocket( const std::string &addr, const std::string &group = std::string(), bool freebind = false ) { //{{{ if( addr == "none" ) return NULL; if( addr.find("tcp:") == 0 ) return new ControlSockTCP( addr.substr(4), freebind ); return new ControlSockUnix( addr, group ); } //}}} } // BitB namespace #endif // _BB_CONTROL_SOCKET_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/exceptions.h0000644000000000000000000001106414136173163016546 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2003 - 2021, Ron #ifndef _BB_EXCEPTIONS_H #define _BB_EXCEPTIONS_H #include #include #include #include #include #include #include // If the thread cancellation exceptions are not supported, provide our own // definition of the exception type. The main trick here is that we can't // put it directly into namespace abi if that is really an alias to some // other internal namespace name. If it's an alias to some other name than // __cxxabiv1, then this will explode at build time and we'll need to add // a new configure test for other possible aliases, but right now that is // what is used on all platforms with the abi namespace so far. #if !HAVE_ABI_FORCED_UNWIND #if HAVE_ABI_ALIAS_TO_CXXABIV1 namespace __cxxabiv1 #else namespace abi #endif { struct __forced_unwind {}; } #endif namespace BitB { BB_PRINTF_FORMAT(2,0) static inline int Vasprintf( char **strp, const char *format, va_list arglist ) { //{{{ #if HAVE_VASPRINTF return vasprintf( strp, format, arglist ); #else // We can implement this in alternative ways if needed #error "vasprintf is not supported" #endif } //}}} class Exception : public std::exception { //{{{ private: std::string m_msg; public: Exception() throw() {} Exception( const std::string &msg ) throw() : m_msg( msg ) {} BB_PRINTF_FORMAT(2,3) Exception( const char *format, ... ) throw() { va_list arglist; va_start( arglist, format ); SetMessage( format, arglist ); va_end( arglist ); } // Default copy ctor and assignment oper ok. ~Exception() throw() {} void SetMessage( const std::string &msg ) throw() { m_msg = msg; } BB_PRINTF_FORMAT(2,0) void SetMessage( const char *format, va_list args ) throw() { char *msg = NULL; if( Vasprintf( &msg, format, args ) >= 0 ) { m_msg = msg; free( msg ); } else m_msg.append( " *** Error in BitB::Exception::SetMessage" ); } BB_PRINTF_FORMAT(2,3) void SetMessage( const char *format, ... ) throw() { va_list arglist; va_start( arglist, format ); SetMessage( format, arglist ); va_end( arglist ); } void AppendMessage( const std::string &msg ) throw() { m_msg.append( msg ); } const char *what() const throw() { return m_msg.empty() ? "Unspecified BitB::Exception" : m_msg.c_str(); } }; //}}} class Error : public Exception { //{{{ public: Error() throw() {} Error( const std::string &msg ) throw() : Exception( msg ) {} BB_PRINTF_FORMAT(2,3) Error( const char *format, ... ) throw() { va_list arglist; va_start( arglist, format ); SetMessage( format, arglist ); va_end( arglist ); } }; //}}} class SystemError : public Error { //{{{ private: int m_errno; public: SystemError() throw() : m_errno( errno ) { SetMessage( "System Error: %s", strerror( m_errno ) ); } SystemError( const std::string &msg ) throw() : Error( msg + ": " + strerror(errno) ) , m_errno( errno ) {} BB_PRINTF_FORMAT(2,3) SystemError( const char *format, ... ) throw() : m_errno( errno ) { va_list arglist; va_start( arglist, format ); SetMessage( format, arglist ); va_end( arglist ); AppendMessage( std::string(": ") + strerror(m_errno) ); } BB_PRINTF_FORMAT(3,4) SystemError( int code, const char *format, ... ) throw() : m_errno( code ) { va_list arglist; va_start( arglist, format ); SetMessage( format, arglist ); va_end( arglist ); AppendMessage( std::string(": ") + strerror(m_errno) ); } int GetErrorCode() const { return m_errno; } }; //}}} } #endif // _BB_EXCEPTIONS_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/ftdi-device.h0000644000000000000000000012442014136173163016551 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2010 - 2021, Ron #ifndef _BB_FTDI_DEVICE_H #define _BB_FTDI_DEVICE_H #include #define FTDI_VENDOR_ID 0x0403 #define FTDI_PRODUCT_ID 0x6014 // Sanity check the modem and line status bits in each packet. //{{{ // There probably aren't many good reasons to disable this, but the exact set of // values which may be considered sane could need tuning for a device that isn't // running in MPSSE mode. These checks are cheap to run, so it really only makes // sense to disable them if you're in the process of adding something which would // alter what the expected valid values should be. //}}} #define CHECK_LINE_STATUS namespace BitB { // Interface to an FTDI device class FTDI : public RefCounted { //{{{ protected: // FTDI control request types enum RequestType { //{{{ // All these implicitly include LIBUSB_RECIPIENT_DEVICE FTDI_DEVICE_OUT_REQ = int(LIBUSB_REQUEST_TYPE_VENDOR) | LIBUSB_ENDPOINT_OUT, FTDI_DEVICE_IN_REQ = int(LIBUSB_REQUEST_TYPE_VENDOR) | LIBUSB_ENDPOINT_IN }; //}}} // FTDI control requests enum ControlRequest { //{{{ FTDI_SIO_RESET = 0x00, FTDI_SIO_MODEM_CTRL = 0x01, FTDI_SIO_SET_FLOW_CTRL = 0x02, FTDI_SIO_SET_BAUD_RATE = 0x03, FTDI_SIO_SET_DATA = 0x04, // Set data/parity/stop bits FTDI_SIO_GET_MODEM_STATUS = 0x05, FTDI_SIO_SET_EVENT_CHAR = 0x06, FTDI_SIO_SET_ERROR_CHAR = 0x07, FTDI_SIO_SET_LATENCY_TIMER = 0x09, FTDI_SIO_GET_LATENCY_TIMER = 0x0A, FTDI_SIO_SET_BITMODE = 0x0B, FTDI_SIO_READ_PINS = 0x0C, FTDI_SIO_READ_EEPROM = 0x90, FTDI_SIO_WRITE_EEPROM = 0x91, FTDI_SIO_ERASE_EEPROM = 0x92 }; //}}} // Options for the FTDI_SIO_RESET request enum ResetOperation { //{{{ FTDI_SIO_RESET_SIO = 0, FTDI_SIO_RESET_PURGE_RX = 1, FTDI_SIO_RESET_PURGE_TX = 2 }; //}}} // Options for the FTDI_SIO_SET_FLOW_CTRL request enum FlowControl { //{{{ FLOW_NONE = 0x0000, FLOW_RTS_CTS = 0x0100, FLOW_DTR_DSR = 0x0200, FLOW_XON_XOFF = 0x0400 }; //}}} // Options for the FTDI_SIO_SET_BITMODE request enum Bitmode { //{{{ BITMODE_RESET = 0x0000, // Reset (to the default) I/O mode BITMODE_ASYNC_BITBANG = 0x0100, // Asynchronous bitbang mode BITMODE_MPSSE = 0x0200, // MPSSE mode BITMODE_SYNC_BITBANG = 0x0400, // Synchronous bitbang mode BITMODE_MCU_HOST = 0x0800, // MCU host bus emulation mode BITMODE_FAST_SERIAL = 0x1000, // Fast serial for opto-isolation BITMODE_CBUS_BITBANG = 0x2000, // CBUS bitbang mode BITMODE_SYNC_FIFO = 0x4000 // Single channel synchronous 245 FIFO mode }; //}}} // Index for interface control requests enum InterfaceIndex { //{{{ FTDI_INTERFACE_A = 1, FTDI_INTERFACE_B = 2, FTDI_INTERFACE_C = 3, FTDI_INTERFACE_D = 4 }; //}}} enum ModemStatus { //{{{ FTDI_MAX64 = 0x01, // wMaxPacketSize 64 FTDI_MAX512 = 0x02, // wMaxPacketSize 512 FTDI_CTS = 0x10, // Clear to send FTDI_DSR = 0x20, // Data set ready FTDI_RI = 0x40, // Ring indicator FTDI_RLSD = 0x80 // Receive line signal detect }; //}}} enum LineStatus { //{{{ FTDI_DR = 0x01, // Data ready FTDI_OE = 0x02, // Overrun error FTDI_PE = 0x04, // Parity error FTDI_FE = 0x08, // Framing error FTDI_BI = 0x10, // Break interrupt FTDI_THRE = 0x20, // Transmit holding register empty FTDI_TEMT = 0x40, // Transmitter empty FTDI_RCVR = 0x80 // RCVR FIFO error }; //}}} enum MPSSECommand { //{{{ // Data shifting commands. // Bit0: -ve CLK on write // Bit1: bit mode if 1, else byte mode // Bit2: -ve CLK on read // Bit3: LSB first if 1, else MSB first // Bit4: Write TDI // Bit5: Read TDO // Bit6: Write TMS // Bit6: Always 0 // Clock len data bytes in on +ve clock edge MSB first MPSSE_DATA_BYTE_IN_POS_MSB = 0x20, // Clock len data bytes in on -ve clock edge MSB first MPSSE_DATA_BYTE_IN_NEG_MSB = 0x24, // Clock len data bytes in on +ve clock edge LSB first MPSSE_DATA_BYTE_IN_POS_LSB = 0x28, // Clock len data bytes in on -ve clock edge LSB first MPSSE_DATA_BYTE_IN_NEG_LSB = 0x2C, // Set / Read Data Bits High / Low Bytes MPSSE_SET_DATABITS_LOW = 0x80, // Set low data pins MPSSE_SET_DATABITS_HIGH = 0x82, // Set high data pins // Loopback commands MPSSE_LOOPBACK = 0x84, // Connect TDI/DO to TDO/DI MPSSE_NO_LOOPBACK = 0x85, // Disable loopback operation // Set CLK divisor MPSSE_SET_CLK_DIVISOR = 0x86, // MPSSE and MCU Host Emulation commands MPSSE_SEND_IMMEDIATE = 0x87, // Flush buffer to host // FT232H, FT2232H & FT4232H only MPSSE_NO_CLK_DIV5 = 0x8A, // Disable clock divide by 5 MPSSE_NO_3PHASE_CLK = 0x8D, // Disable 3 phase data clock MPSSE_NO_ADAPTIVE_CLK = 0x97 // Disable adaptive clocking }; //}}} static const unsigned FTDI_READ_RETRIES = 10; private: USBContext::Device::Handle m_dev; USBContext::Device::Open::Handle m_dh; unsigned m_timeout; unsigned m_latency; unsigned m_maxpacket; uint16_t /* InterfaceIndex */ m_index; uint8_t m_configuration; uint8_t m_interface; uint8_t m_altsetting; uint8_t m_epin; // Endpoint device->host address uint8_t m_epout; // Endpoint host->device address uint8_t m_linestatus; size_t m_chunksize; size_t m_chunkhead; size_t m_chunklen; uint8_t *m_chunkbuf; uint8_t m_expect_modemstatus; protected: void ftdi_reset() { //{{{ ScopedCancelState cancelstate; int ret = libusb_control_transfer( *m_dh, FTDI_DEVICE_OUT_REQ, FTDI_SIO_RESET, FTDI_SIO_RESET_SIO, m_index, NULL, 0, m_timeout ); if( ret < 0 ) ThrowUSBError( ret, _("FTDI: failed to reset device") ); } //}}} void ftdi_set_bitmode( Bitmode b, uint8_t mask = 0 ) { //{{{ // The upper 8 bits of the 16-bit value here set the bitmode, // with the lower 8 bits configuring pins as input or output // in the modes where that is relevant (0 -> input, 1 -> output). ScopedCancelState cancelstate; int ret = libusb_control_transfer( *m_dh, FTDI_DEVICE_OUT_REQ, FTDI_SIO_SET_BITMODE, uint16_t(b | mask), m_index, NULL, 0, m_timeout ); if( ret < 0 ) ThrowUSBError( ret, _("FTDI: failed to set bitmode 0x%04x"), b | mask ); } //}}} // Set (or disable) special characters in the data stream signalling that // an event fired or an error occurred. void ftdi_set_special_chars( char event = '\0', bool evt_enable = false, char error = '\0', bool err_enable = false ) { //{{{ ScopedCancelState cancelstate; int ret = libusb_control_transfer( *m_dh, FTDI_DEVICE_OUT_REQ, FTDI_SIO_SET_EVENT_CHAR, uint16_t(event | (evt_enable ? 0x100 : 0)), m_index, NULL, 0, m_timeout ); if( ret < 0 ) { if( evt_enable ) ThrowUSBError( ret, _("FTDI: failed to set event char 0x%02x"), event ); else ThrowUSBError( ret, _("FTDI: failed to disable event char") ); } ret = libusb_control_transfer( *m_dh, FTDI_DEVICE_OUT_REQ, FTDI_SIO_SET_ERROR_CHAR, uint16_t(error | (err_enable ? 0x100 : 0)), m_index, NULL, 0, m_timeout ); if( ret < 0 ) { if( err_enable ) ThrowUSBError( ret, _("FTDI: failed to set error char 0x%02x"), error ); else ThrowUSBError( ret, _("FTDI: failed to disable error char") ); } } //}}} void ftdi_set_latency_timer( uint8_t ms ) { //{{{ // The FT232H datasheet says this can be between 0 and 255ms, // while the D2XX API documentation says it must be >= 2ms ... // They are both wrong, it must be > 0. if( ms < 1 ) ThrowError( _("Invalid latency timeout %u < 1ms"), ms ); ScopedCancelState cancelstate; int ret = libusb_control_transfer( *m_dh, FTDI_DEVICE_OUT_REQ, FTDI_SIO_SET_LATENCY_TIMER, ms, m_index, NULL, 0, m_timeout ); if( ret < 0 ) ThrowUSBError( ret, _("FTDI: failed to set latency timer to %ums"), ms ); } //}}} void ftdi_set_flow_control( FlowControl mode ) { //{{{ // This one is a bit weird, the flow control mode is passed with the index // and the value is only used when setting FLOW_XON_XOFF to set the start // and stop characters using (stop << 8 | start). ScopedCancelState cancelstate; int ret = libusb_control_transfer( *m_dh, FTDI_DEVICE_OUT_REQ, FTDI_SIO_SET_FLOW_CTRL, 0, uint16_t(mode) | m_index, NULL, 0, m_timeout ); if( ret < 0 ) ThrowUSBError( ret, _("FTDI: failed to set flow control mode 0x%04x"), mode ); } //}}} // Modem status bits in the high byte, line status bits in the low byte. uint16_t ftdi_get_modem_status() { //{{{ uint8_t ms[2]; int ret = libusb_control_transfer( *m_dh, FTDI_DEVICE_IN_REQ, FTDI_SIO_GET_MODEM_STATUS, 0, m_index, ms, 2, m_timeout ); if( ret < 0 ) ThrowUSBError( ret, _("FTDI: failed to get modem status") ); if( ret != 2 ) ThrowError( _("FTDI: get modem status returned %d bytes"), ret ); return uint16_t( (ms[0] << 8) | ms[1] ); } //}}} void ftdi_write( const uint8_t *buf, size_t len ) { //{{{ #ifdef CHECK_LINE_STATUS // This is a little bit stricter then it absolutely needs to be. //{{{ // We can actually send off a couple more requests after these bits // become unset before everything falls over, but we don't actually // (need to) do that right now, and if we get too far ahead, then the // write request will effectively block indefinitely (since we don't // have a separate thread continually reading), and since we had to // disable cancellation because libusb is unsafe for that, it means // pretty much nothing short of SIGKILL will save us at that point. // This shouldn't ever happen in normal use, and we can still have // two requests currently in flight before making more will trigger // it, but be defensive because we can and should be. //}}} if( __builtin_expect(m_linestatus != (FTDI_THRE | FTDI_TEMT), 0) ) ThrowError( _("FTDI: aborted write of len %zu with line status 0x%02x"), len, m_linestatus ); #endif // The libusb_bulk_transfer function handles both reads and writes, // so its data buffer parameter isn't const, but it doesn't modify // the buffer passed to it for a write operation. unsigned char *b = const_cast(buf); int oldstate; while( len ) { pthread_testcancel(); pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, &oldstate ); int xfer; int n = int(std::min( len, m_chunksize )); int ret = libusb_bulk_transfer( *m_dh, m_epout, b, n, &xfer, m_timeout ); pthread_setcancelstate( oldstate, NULL ); switch( ret ) { case 0: case LIBUSB_ERROR_TIMEOUT: // This shouldn't ever happen, but catch it here // instead of doing something stupid if it does ... if( __builtin_expect(xfer < 0 || size_t(xfer) > len,0) ) ThrowError( _("FTDI: OOPS write of %d returned %d ..."), n, xfer ); len -= unsigned(xfer); b += xfer; break; default: ThrowUSBError( ret, _("FTDI: write of %d/%zu bytes failed"), n, len ); } } } //}}} // Return the next largest multiple of m_maxpacket from n. size_t round_to_maxpacket( size_t n ) { return n + m_maxpacket - 1 - (n - 1) % m_maxpacket; } // Note that the buffer passed to this MUST be a multiple of m_maxpacket // that is equal to or larger in size than len. No more than m_chunksize // bytes will be returned from a single read regardless of the len passed. size_t ftdi_read_raw( uint8_t *buf, size_t len ) { //{{{ int oldstate; int xfer; int n = int(std::min( len, m_chunksize )); // Ensure we always request a multiple of m_maxpacket, otherwise // we can get an overflow from the last packet that is received, // since the transfer size isn't sent to the device and it might // still send a 'full' packet even if we wanted less than that. n = int(round_to_maxpacket(size_t(n))); pthread_testcancel(); pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, &oldstate ); int ret = libusb_bulk_transfer( *m_dh, m_epin, buf, n, &xfer, m_timeout ); pthread_setcancelstate( oldstate, NULL ); // LogMsg<4>("ftdi_read: len %5zu, req %5d, got %5d, ret %d [%s ]", len, n, xfer, ret, // OctetsToHex( OctetString( buf, std::min(xfer, 8) ) ).c_str() ); switch( ret ) { case 0: case LIBUSB_ERROR_TIMEOUT: // This shouldn't ever happen, but catch it here // instead of doing something stupid if it does ... if( __builtin_expect(xfer < 0 || xfer > n, 0) ) ThrowError( _("FTDI: OOPS read of %d returned %d ..."), n, xfer ); return size_t(xfer); } ThrowUSBError( ret, _("FTDI: read chunk of %d bytes failed"), n ); #if EM_PLATFORM_MSW // Really we'll never get here, but mingw-w64 4.9.2-21+15.4 // appears to be too stupid to figure that out ... return 0; #endif } //}}} size_t ftdi_read( uint8_t *buf, size_t len ) { //{{{ size_t r = 0; while( len ) { if( m_chunklen ) { // The first two bytes of every packet from the chip are used // to signal 'modem status', so we need to strip those out. size_t packethead = m_chunkhead % m_maxpacket; size_t packetlen = m_maxpacket - packethead; #ifdef CHECK_LINE_STATUS size_t skip = 0; switch( packethead ) { case 0: if( __builtin_expect(m_chunkbuf[m_chunkhead] != m_expect_modemstatus, 0) ) { //{{{ size_t chunklen = m_chunklen; m_chunklen = 0; ThrowError( _("FTDI: read invalid packet: " " len %5zu, chead %zu, clen %zu [%s ]"), len, m_chunkhead, chunklen, OctetsToHex( OctetString( m_chunkbuf + m_chunkhead, std::min( chunklen, size_t(8)) ) ).c_str() ); } //}}} if( m_chunklen > 1 ) { if( __builtin_expect( (m_chunkbuf[m_chunkhead + 1] & ~(FTDI_THRE | FTDI_TEMT)) != 0, 0) ) { //{{{ size_t chunklen = m_chunklen; m_chunklen = 0; ThrowError( _("FTDI: read unexpected line status: " " len %5zu, chead %zu, clen %zu [%s ]"), len, m_chunkhead, chunklen, OctetsToHex( OctetString( m_chunkbuf + m_chunkhead, std::min( chunklen, size_t(8)) ) ).c_str() ); } //}}} m_linestatus = m_chunkbuf[m_chunkhead + 1]; skip = 2; } else { skip = 1; } break; case 1: if( __builtin_expect( (m_chunkbuf[m_chunkhead] & ~(FTDI_THRE | FTDI_TEMT)) != 0, 0) ) { //{{{ size_t chunklen = m_chunklen; m_chunklen = 0; ThrowError( _("FTDI: read unexpected line status: " " len %5zu, chead %zu, clen %zu [%s ]"), len, m_chunkhead, chunklen, OctetsToHex( OctetString( m_chunkbuf + m_chunkhead, std::min( chunklen, size_t(8)) ) ).c_str() ); } //}}} m_linestatus = m_chunkbuf[m_chunkhead]; skip = 1; break; } m_chunkhead += skip; m_chunklen -= skip; packetlen -= skip; #else // ! CHECK_LINE_STATUS if( packethead < 2 ) { size_t skip = std::min( m_chunklen, 2 - packethead ); m_chunkhead += skip; m_chunklen -= skip; packetlen -= skip; } #endif // The actual data in this packet is then the the minimum of: // m_chunklen // len, // maxpacket - 2 (which can never be less than): // distance from m_chunkhead to the next packet boundary size_t n = std::min( len, std::min( packetlen, m_chunklen ) ); // LogMsg<2>("ftdi_read: len %5zu, chead %5zu, clen %5zu, phead %3zu, plen %3zu, n %zu", // len, m_chunkhead, m_chunklen, packethead, packetlen, n); memcpy( buf, m_chunkbuf + m_chunkhead, n ); m_chunkhead += n; m_chunklen -= n; len -= n; buf += n; r += n; continue; } size_t xfer = ftdi_read_raw( m_chunkbuf, len ); #ifdef CHECK_LINE_STATUS if( __builtin_expect(xfer == 2, 0) ) { if( __builtin_expect(m_chunkbuf[0] != m_expect_modemstatus || (m_chunkbuf[1] & ~(FTDI_THRE | FTDI_TEMT)) != 0, 0) ) ThrowError( _("FTDI: read invalid packet: len %5zu, got %5zu [%s ]"), len, xfer, OctetsToHex( OctetString( m_chunkbuf, std::min(xfer, size_t(8)) ) ).c_str() ); m_linestatus = m_chunkbuf[1]; return r; } if( __builtin_expect(xfer < 2, 0) ) return r; m_chunkhead = 0; m_chunklen = xfer; #else // ! CHECK_LINE_STATUS if( xfer < 3 ) return r; m_chunkhead = 2; m_chunklen = xfer - 2; #endif } return r; } //}}} USBContext::Device::Open::Handle GetDeviceHandle() const { return m_dh; } unsigned GetUSBTimeout() const { return m_timeout; } uint16_t GetInterfaceIndex() const { return m_index; } uint8_t GetLineStatus() const { return m_linestatus; } size_t GetReadAhead() const { return m_chunklen; } void WriteCommand( const OctetString &cmd ) { //{{{ if( __builtin_expect(opt_verbose >= 6, 0) ) LogMsg<6>( "FTDI::WriteCommand(%s )", OctetsToHex(cmd).c_str() ); ftdi_write( cmd.data(), cmd.size() ); } //}}} void WriteCommand( const uint8_t *cmd, size_t len ) { //{{{ if( __builtin_expect(opt_verbose >= 6, 0) ) return WriteCommand( OctetString( cmd, len ) ); ftdi_write( cmd, len ); } //}}} private: // Returns true if initial FTDI communication is successful bool check_sync( uint8_t cmd ) { //{{{ const uint8_t msg[] = { cmd, MPSSE_SEND_IMMEDIATE }; unsigned n = 0; uint8_t buf[512]; // must be at least wMaxPacketSize LogMsg<3>( "FTDI::check_sync( %02x )", cmd ); WriteCommand( msg, sizeof(msg) ); do { size_t ret = ftdi_read_raw( buf, sizeof(buf) ); if( ret == 4 && buf[2] == 0xFA && buf[3] == cmd ) { LogMsg<3>( "have sync for 0x%x (n = %u)", cmd, n ); return true; } if( ret > 2 ) { LogMsg<3>( "sync returned %zu bytes (n = %u)", ret, n ); n = 0; if( opt_verbose > 3 ) { size_t nbytes = 16; LogMsg<4>( ret > nbytes ? "%s ..." : "%s", OctetsToShortHex( OctetString( buf, std::min(ret, nbytes) ) ).c_str() ); } } } while( ++n < FTDI_READ_RETRIES ); return false; } //}}} protected: // Returns the total number of bytes purged size_t purge_read() { //{{{ uint8_t buf[8192]; size_t count = 0; unsigned n = 0; LogMsg<3>( "FTDI::purge_read" ); // This shouldn't ever happen, but things could go badly // in confusing and intermittent ways if it's not true. if( round_to_maxpacket( sizeof(buf) ) != sizeof(buf) ) ThrowError( "FTDI::purge_read buffer %zu is not a multiple of wMaxPacketSize %u", sizeof(buf), m_maxpacket ); do { // We can't reliably check the upper nybble of the modem status // byte because we may not always be in MPSSE mode here, and we // can't guarantee what state the UART signal pins might be in. // // And we can't reliably use the line status bits here, because // they might not be usefully set to indicate anything we care // about here. We just want to drain any junk that might still // be in the buffer on the chip which a reset didn't get rid of. size_t ret = ftdi_read_raw( buf, sizeof(buf) ); if( ret > 2 ) { count += ret; LogMsg<3>( "purged %zu / %zu (n = %u)", ret, count, n ); n = 0; if( opt_verbose > 3 ) { size_t nbytes = 16; LogMsg<4>( ret > nbytes ? "%s ..." : "%s", OctetsToShortHex( OctetString( buf, std::min(ret, nbytes) ) ).c_str() ); } } } while( ++n < FTDI_READ_RETRIES ); return count; } //}}} // This is the maximum amount of data we allow for a single transfer. //{{{ // Mostly it is chosen to limit the amount of time we might block on // waiting for a single transfer to complete. // // Returns the actual size that was set, which may have been clamped to // the maximum transfer size for the device, or rounded to the next // largest multiple of the maximum packet size. // // NOTE: // Changing the chunk size will recreate the internal chunk buffer and // discard any data that was still in it at the time. There is no // internal locking of that here, so it is the caller's responsibility // to ensure nothing is concurrently trying to access it (ie. in a call // to ftdi_read) and that discarding any data in it will be safe and/or // acceptable. //}}} size_t SetChunkSize( size_t bytes ) { //{{{ if( bytes > m_dev->GetMaxTransferSize() ) bytes = m_dev->GetMaxTransferSize(); // Round up the desired chunksize to the next multiple of m_maxpacket. size_t chunksize = bytes + m_maxpacket - 1 - (bytes - 1) % m_maxpacket; if( chunksize != m_chunksize ) { if( m_chunkbuf ) { delete [] m_chunkbuf; m_chunkbuf = NULL; // Just in case new throws ... } m_chunkbuf = new uint8_t[chunksize]; m_chunksize = chunksize; m_chunkhead = 0; m_chunklen = 0; } return m_chunksize; } //}}} // Set the timeout for completing short packets when there is no more data to send //{{{ // It is usually better to use an explicit flush, like MPSSE_SEND_IMMEDIATE // or the other on-chip triggers, than to try to tune throughput with this. // So normally this should be set to a large enough value to permit complete // transfer of the largest expected packet size, without truncating them due // to a timer expiry. There is some inherent latency in the chip which can // make that be a slightly longer time than the theoretical transfer time of // the data. // // NOTE: // Calling this does not in itself change the current latency setting (and // changing it on the fly between transfers is known to get the chip into a // confused state in some circumstances, so you should rethink if you really // want to do that anyway). The desired latency must be calculated and set // before calling InitMPSSE(), since that is the only place at present where // the value set here will be used. //}}} void SetLatency( unsigned ms ) { //{{{ if( ms < 1 || ms > 255 ) ThrowError( _("FTDI::SetLatency( %u ): invalid value, must be > 0 and < 255"), ms ); m_latency = ms; } //}}} // Put the chip into MPSSE mode bool InitMPSSE() { //{{{ // Initialise MPSSE mode (ref AN-135 4.2) ftdi_reset(); purge_read(); ftdi_set_special_chars(); ftdi_set_latency_timer( uint8_t(m_latency) ); ftdi_set_flow_control( FLOW_RTS_CTS ); ftdi_set_bitmode( BITMODE_RESET ); ftdi_set_bitmode( BITMODE_MPSSE ); // Wait 50ms for all of that to settle usleep(50000); m_linestatus = uint8_t(ftdi_get_modem_status() & 0xFF); try { // Sometimes the first write here just won't get a response, // for reasons that seem to be something to do with persistent // state inside the FTDI that isn't cleared by any sort of soft // reset. So retry it again once before starting another full // reset cycle, because that will often be enough to clear it. return (check_sync(0xAA) && check_sync(0xAB)) || (check_sync(0xAA) && check_sync(0xAB)); } BB_CATCH_ALL( 0, _("FTDI::InitMPSSE: sync failed") ) return false; } //}}} void ResetBitmode() { //{{{ if( ! m_dh ) return; try { // If any of these fail, the rest almost certainly will too. // The most likely reason being the device is already gone. purge_read(); ftdi_set_bitmode( BITMODE_RESET ); ftdi_reset(); } BB_CATCH_ALL( 2, _("FTDI: ResetBitmode failed") ) } //}}} public: FTDI( const USBContext::Device::Handle &dev, bool claim_now = true ) : m_dev( dev ) , m_timeout( 5000 ) // milliseconds , m_latency( 1 ) , m_index( FTDI_INTERFACE_A ) , m_configuration( 1 ) // bConfigurationValue , m_interface( 0 ) // bInterfaceNumber , m_altsetting( 0 ) // bAlternateSetting , m_linestatus( 0 ) , m_chunksize( 0 ) , m_chunkhead( 0 ) , m_chunklen( 0 ) , m_chunkbuf( NULL ) { //{{{ LogMsg<2>( "+ FTDI" ); // Sanity check some things before we access them. try { const USBContext::Device:: AltSetting &alt = m_dev->GetConfiguration( m_configuration ) .GetInterface( m_interface ) .GetAltSetting( m_altsetting ); if( alt.endpoint.size() != 2 ) throw Error( _("Configuration %u, Interface %u, AltSetting %u " "has %zu endpoints, expecting 2"), m_configuration, m_interface, m_altsetting, alt.endpoint.size() ); m_maxpacket = alt.endpoint[0].wMaxPacketSize; m_epin = alt.endpoint[0].bEndpointAddress; m_epout = alt.endpoint[1].bEndpointAddress; m_expect_modemstatus = uint8_t(FTDI_DSR | FTDI_CTS | (m_maxpacket == 64 ? FTDI_MAX64 : FTDI_MAX512)); } catch( const std::exception &e ) { ThrowError( _("FTDI: %s"), e.what() ); } // We could probably default this one safely to 64 if we couldn't read it, // but that's also probably a sign of something bigger gone wrong too ... if( ! m_maxpacket ) ThrowError( _("FTDI: failed to get maximum packet size") ); // We get two 'modem status' junk bytes at the start of every packet, // so make sure there will actually be room for some data too. if( m_maxpacket <= 2 ) ThrowError( _("FTDI: maximum packet size %u is smaller than the protocol overhead"), m_maxpacket ); if( USBContext::Device::Endpoint::Direction( m_epin ) != LIBUSB_ENDPOINT_IN ) ThrowError( _("FTDI: device endpoint[0] direction is not 'IN'") ); if( USBContext::Device::Endpoint::Direction( m_epout ) != LIBUSB_ENDPOINT_OUT ) ThrowError( _("FTDI: device endpoint[1] direction is not 'OUT'") ); if( claim_now ) Claim(); SetChunkSize( 65536 ); } //}}} ~FTDI() { //{{{ LogMsg<2>( "- FTDI" ); Release(); if( m_chunkbuf ) delete [] m_chunkbuf; } //}}} // This may be called whether we've claimed the device interface or not. //{{{ // If the device configuration cannot be restored, then the device // may be disconnected and reconnected, in which case this will then // throw a USBError with LIBUSB_ERROR_NOT_FOUND set, the claim on the // device interface (if it was held) will be released, and this FTDI // instance will henceforth be invalid (since the device it refers to // will no longer exist). // // If hotplug is enabled, the device should be re-enumerated again // normally (if it can be reconnected). Otherwise you may need to // rescan the bus to find it again. //}}} void SoftReset() { //{{{ if( ! m_dh ) { m_dev->OpenDevice()->SoftReset(); return; } try { m_dh->SoftReset(); } catch( ... ) { m_dh = NULL; throw; } } //}}} // Return true if we currently have a claim on the device interface. bool IsClaimed() const { return m_dh != NULL; } // Returns true if the device was (newly) claimed by calling this, //{{{ // or false if it was already claimed by us. Will throw if getting // a claim on the device fails. // // There is no reference count on claims. No matter how many times // you call this, the first call to Release() will drop the claim. //}}} virtual bool Claim() { //{{{ if( m_dh != NULL ) return false; try { m_dh = m_dev->OpenDevice(); m_dh->SetConfiguration( m_configuration ); m_dh->ClaimInterface( m_interface ); if( m_altsetting ) m_dh->SetAltInterface( m_interface, m_altsetting ); return true; } catch( ... ) { m_dh = NULL; throw; } } //}}} // Release the current claim on this device. //{{{ // It is the responsibility of the caller to ensure that no other // functions which might access the device are called while a claim // on it is not held. //}}} virtual void Release() { m_dh = NULL; } // If the endpoint_address isn't specified explicitly, try to clear // a stall from all endpoints of the currently claimed interface(s). void ClearHalt( unsigned endpoint_address = 0x100 ) { if( m_dh != NULL ) m_dh->ClearHalt( endpoint_address ); } // Return true if this is device d. bool IsDevice( const USBContext::Device::Handle &d ) { //{{{ // A null device never matches anything. Just like SQL! if( ! m_dev || ! d ) return false; return *m_dev == *d; } //}}} size_t GetChunkSize() const { return m_chunksize; } unsigned GetLatency() const { return m_latency; } unsigned GetMaxPacketSize() const { return m_maxpacket; } const std::string &GetManufacturer() const { return m_dev->GetManufacturer(); } const std::string &GetProduct() const { return m_dev->GetProduct(); } const std::string &GetSerial() const { return m_dev->GetSerial(); } std::string ProductStr() const { return m_dev->ProductStr(); } // Convenience methods for Logging //{{{ BB_PRINTF_FORMAT(2,3) std::string ErrorStr( const char *format, ... ) const { //{{{ va_list arglist; std::string msg( m_dev->GetSerial() ); va_start( arglist, format ); msg.append( ": " ) .append( vstringprintf( format, arglist ) ); va_end( arglist ); return msg; } //}}} template< int N > BB_PRINTF_FORMAT(2,3) void LogError( const char *format, ... ) const { //{{{ va_list arglist; std::string msg( m_dev->GetSerial() ); va_start( arglist, format ); msg.append( ": " ) .append( vstringprintf( format, arglist ) ); va_end( arglist ); Log( "%s\n", msg.c_str() ); } //}}} BB_NORETURN BB_PRINTF_FORMAT(2,3) void ThrowError( const char *format, ... ) const { //{{{ va_list arglist; std::string msg( m_dev->GetSerial() ); va_start( arglist, format ); msg.append( ": " ) .append( vstringprintf( format, arglist ) ); va_end( arglist ); throw Error( msg ); } //}}} template< int N > BB_PRINTF_FORMAT(3,4) void LogUSBError( int err, const char *format, ... ) const { //{{{ va_list arglist; std::string msg( m_dev->GetSerial() ); va_start( arglist, format ); msg.append( ": " ) .append( vstringprintf( format, arglist ) ); va_end( arglist ); Log( "%s: %s\n", msg.c_str(), libusb_strerror(libusb_error(err)) ); } //}}} BB_NORETURN BB_PRINTF_FORMAT(3,4) void ThrowUSBError( int err, const char *format, ... ) const { //{{{ va_list arglist; std::string msg( m_dev->GetSerial() ); va_start( arglist, format ); msg.append( ": " ) .append( vstringprintf( format, arglist ) ); va_end( arglist ); throw USBError( err, "%s", msg.c_str() ); } //}}} BB_PRINTF_FORMAT(2,3) std::string MsgStr( const char *format, ... ) const { //{{{ va_list arglist; std::string msg( m_dev->GetSerial() ); va_start( arglist, format ); msg.append( ": " ) .append( vstringprintf( format, arglist ) ); va_end( arglist ); return msg; } //}}} template< int N > BB_PRINTF_FORMAT(2,3) void LogMsg( const char *format, ... ) const { //{{{ va_list arglist; std::string msg( m_dev->GetSerial() ); va_start( arglist, format ); msg.append( ": " ) .append( vstringprintf( format, arglist ) ); va_end( arglist ); Log( "%s\n", msg.c_str() ); } //}}} //}}} }; //}}} } // BitB namespace #endif // _BB_FTDI_DEVICE_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/health-monitor.h0000644000000000000000000002210314136173163017313 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2014 - 2015, Ron // // You must include bit-babbler/impl/health-monitor.h exactly once in some // translation unit of any program using the HealthMonitor. #ifndef _BB_HEALTH_MONITOR_H #define _BB_HEALTH_MONITOR_H #include #include namespace BitB { class Monitor { //{{{ private: typedef std::list< Monitor* > List; static List ms_list; static pthread_mutex_t ms_mutex; std::string m_id; // You cannot copy this class Monitor( const Monitor& ); Monitor &operator=( const Monitor& ); virtual std::string ReportJSON() const = 0; virtual std::string RawDataJSON() const = 0; public: Monitor( const std::string &id ) : m_id( id ) {} virtual ~Monitor() {} void Register() { //{{{ pthread_mutex_lock( &ms_mutex ); ms_list.push_back( this ); pthread_mutex_unlock( &ms_mutex ); } //}}} void Deregister() { //{{{ pthread_mutex_lock( &ms_mutex ); ms_list.remove( this ); pthread_mutex_unlock( &ms_mutex ); } //}}} const std::string &GetID() const { return m_id; } static std::string GetIDs() { //{{{ ScopedMutex lock( &ms_mutex ); std::string ids( 1, '[' ); bool first = true; for( Monitor::List::iterator i = ms_list.begin(), e = ms_list.end(); i != e; ++i ) { if( first ) first = false; else ids += ','; ids += '"' + (*i)->m_id + '"'; } return ids + ']'; } //}}} static std::string GetStats( const std::string &id = std::string() ) { //{{{ ScopedMutex lock( &ms_mutex ); std::string report( 1, '{' ); bool first = true; for( Monitor::List::iterator i = ms_list.begin(), e = ms_list.end(); i != e; ++i ) { if( id.empty() || id == (*i)->m_id ) { if( first ) first = false; else report += ','; report += '"' + (*i)->m_id + "\":" + (*i)->ReportJSON(); } } return report + '}'; } //}}} static std::string GetRawData( const std::string &id = std::string() ) { //{{{ ScopedMutex lock( &ms_mutex ); std::string report( 1, '{' ); bool first = true; for( Monitor::List::iterator i = ms_list.begin(), e = ms_list.end(); i != e; ++i ) { if( id.empty() || id == (*i)->m_id ) { if( first ) first = false; else report += ','; report += '"' + (*i)->m_id + "\":" + (*i)->RawDataJSON(); } } return report + '}'; } //}}} }; //}}} class HealthMonitor : public Monitor { //{{{ private: mutable pthread_mutex_t m_mutex; uint8_t m_fipsbuf[ QA::FIPS::BUFFER_SIZE ]; size_t m_fipsextra; unsigned long long m_bytes_analysed; unsigned long long m_bytes_passed; QA::FIPS m_fips; QA::Ent8 m_ent; QA::Ent16 m_ent16; bool m_fips_ok; bool m_ent_ok; bool m_ent16_ok; // You must hold m_mutex when calling this std::string QAResultsAsJSON() const { //{{{ return stringprintf( "\"QA\":{" "\"BytesAnalysed\":%llu," "\"BytesPassed\":%llu" "}", m_bytes_analysed, m_bytes_passed ); } //}}} public: HealthMonitor( const std::string &id, bool assume_ent8_ok = true ) : Monitor( id ) , m_fipsextra( 0 ) , m_bytes_analysed( 0 ) , m_bytes_passed( 0 ) , m_fips_ok( false ) , m_ent_ok( assume_ent8_ok ) , m_ent16_ok( true ) { //{{{ // We assume the results are not ok until we have some positive // measure to the contrary from at least the FIPS tests. If we // wanted to be a bit more paranoid we could also wait until we // get a non-failing result back from the Ent tests too, however // there is a tradeoff there in that we won't get back the first // result from the Ent8 test until it has tested 500,000 8-bit // samples while the Ent16 test needs 100 million 16-bit samples // before returning its first result. // // A source providing 1Mbps will take about 4 seconds to get the // first Ent8 result and more than 26 minutes for an Ent16 result. // Which is probably a bit on the long side to be waiting before // a probably ok entropy source is brought online. If there is a // source feeding 5Mbps or above though it tips the balance toward // it making good sense to default the Ent8 test to not being ok // until we've seen a passable result from it too. // // So we let the caller decide whether they want to wait for the // Ent8 results or not, we assume FIPS is bad until it actually // passes (by the more restrictive recovery margin), and we let // the Ent16 test just be a sanity check for long term abnormal // behaviour that may be evident in the larger sample space. Log<2>( "+ HealthMonitor( %s )\n", GetID().c_str() ); pthread_mutex_init( &m_mutex, NULL ); Register(); } //}}} ~HealthMonitor() { Log<2>( "- HealthMonitor( %s )\n", GetID().c_str() ); Deregister(); pthread_mutex_destroy( &m_mutex ); } bool Check( const uint8_t *buf, size_t len ) { //{{{ using QA::FIPS; ScopedMutex lock( &m_mutex ); size_t b = len; m_ent.Analyse( buf, len ); m_ent16.Analyse( buf, len ); if( m_ent.HaveResults() ) m_ent_ok = m_ent.IsOk( m_ent_ok ); if( m_ent16.HaveResults() ) m_ent16_ok = m_ent16.IsOk( m_ent16_ok ); if( m_fipsextra ) { size_t n = std::min( FIPS::BUFFER_SIZE - m_fipsextra, len ); memcpy( m_fipsbuf + m_fipsextra, buf, n ); len -= n; buf += n; m_fipsextra += n; if( m_fipsextra == FIPS::BUFFER_SIZE ) { m_fips.Analyse( m_fipsbuf ); m_fips_ok = m_fips.IsOk( m_fips_ok ); m_fipsextra = 0; } } while( len >= FIPS::BUFFER_SIZE ) { m_fips.Analyse( buf ); m_fips_ok = m_fips.IsOk( m_fips_ok ); len -= FIPS::BUFFER_SIZE; buf += FIPS::BUFFER_SIZE; } if( len ) { memcpy( m_fipsbuf, buf, len ); m_fipsextra = len; } m_bytes_analysed += b; if( m_ent_ok && m_ent16_ok && m_fips_ok ) { m_bytes_passed += b; return true; } return false; } //}}} virtual std::string ReportJSON() const { //{{{ ScopedMutex lock( &m_mutex ); std::string report = '{' + QAResultsAsJSON(); report += ',' + m_fips.ResultsAsJSON(); if( m_ent.HaveResults() ) report += ',' + m_ent.ResultsAsJSON(); if( m_ent16.HaveResults() ) report += ',' + m_ent16.ResultsAsJSON(); return report + '}'; } //}}} virtual std::string RawDataJSON() const { //{{{ ScopedMutex lock( &m_mutex ); std::string s( 1, '{' ); if( m_ent.HaveResults() ) s += m_ent.AsJSON(); if( m_ent16.HaveResults() ) s += ',' + m_ent16.AsJSON(); s += '}'; return s; } //}}} }; //}}} } // BitB namespace #endif // _BB_HEALTH_MONITOR_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/impl/0002755000000000000000000000000014136173163015155 5ustar bit-babbler-0.9/include/bit-babbler/impl/health-monitor.h0000644000000000000000000000114114136173163020253 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2014 - 2015, Ron // // This file provides the implementation detail for bit-babbler/health-monitor.h // which must be defined only once in an application. #ifdef _BBIMPL_HEALTH_MONITOR_H #error bit-babbler/impl/health-monitor.h must be included only once. #endif #define _BBIMPL_HEALTH_MONITOR_H #include namespace BitB { Monitor::List Monitor::ms_list; pthread_mutex_t Monitor::ms_mutex = PTHREAD_MUTEX_INITIALIZER; } // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/impl/log.h0000644000000000000000000003234614136173163016115 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2003 - 2021, Ron // // This file provides the implementation detail for bit-babbler/log.h // which must be defined only once in an application. #ifdef _BBIMPL_LOG_H #error bit-babbler/impl/log.h must be included only once. #endif #define _BBIMPL_LOG_H #include namespace BitB { bool opt_syslog = 0; bool opt_timestamp = 1; int opt_verbose = 0; std::string OctetsToHex( const OctetString &octets, size_t wrap, bool short_form ) { //{{{ std::string s; char b[6]; for( size_t i = 0, e = octets.size(); i < e; ++i ) { int n = snprintf(b, sizeof(b), short_form ? " %02x" : " 0x%02x", octets[i]); #ifdef PARANOID_STRING_CHECKING // This should never happen here if( __builtin_expect( n < 0, 0 ) ) return std::string( "OctetsToHex failed" ); // And this should be even less likely if( __builtin_expect( unsigned(n) > sizeof(b), 0 ) ) s.append( b, sizeof(b) ).append("(truncated)"); #endif s.append( b, unsigned(n) ); if( wrap && i % wrap == wrap - 1 ) s.append( 1, '\n' ); } return s; } //}}} unsigned long StrToScaledUL( const char *s, unsigned scale ) { //{{{ char *e; unsigned long r = strtoul( s, &e, 10 ); if( e == s ) throw Error( _("StrToScaledUL( '%s' ): not a number"), s ); if( *e == '\0' ) return r; // Always let ibibytes be explicitly forced whatever the default scale if( *(e+1) == 'i' ) scale = 1024; switch( *e ) { case '\0': return r; case 'k': case 'K': // Accept this abomination too, because people. return r * scale; case 'M': return r * scale * scale; case 'G': return r * scale * scale * scale; case 'T': return r * scale * scale * scale * scale; } throw Error( _("StrToScaledUL( '%s' ): '%c' is not a recognised scale"), s, *e ); } //}}} double StrToScaledD( const char *s ) { //{{{ errno = 0; char *e; double r = strtod( s, &e ); double scale = 1000.0; if( errno ) throw SystemError( errno, _("StrToScaledD( '%s' ) failed"), s ); if( e == s ) throw Error( _("StrToScaledD( '%s' ): not a number"), s ); if( *e == '\0' ) return r; if( *(e+1) == 'i' ) scale = 1024.0; switch( *e ) { case 'p': return r / (scale * scale * scale * scale); case 'n': return r / (scale * scale * scale); case 'u': return r / (scale * scale); case 'm': return r / scale; case '\0': return r; case 'k': case 'K': // Accept this abomination too, because people. return r * scale; case 'M': return r * (scale * scale); case 'G': return r * (scale * scale * scale); case 'T': return r * (scale * scale * scale * scale); } throw Error( _("StrToScaledD( '%s' ): '%c' is not a recognised scale"), s, *e ); } //}}} // This is a workaround for (at least) GCC 8.3.0, which annoyingly suggests // that we flag this as cold, iff we annotate the declaration with BB_COLD. // It is a known bug, that empirically is not seen in GCC 10.2.1 and should // not be seen before GCC 8, since -Wsuggest-attribute=cold (and malloc) // were not implemented before then. #if EM_COMPILER_GCC(8,0) && ! EM_COMPILER_GCC(10,2) EM_PUSH_DIAGNOSTIC_IGNORE("-Wsuggest-attribute=cold") #endif void GetFutureTimespec( timespec &ts, unsigned ms ) { //{{{ // This one doesn't really belong in with the log.h code, but that does // already depend on the only other time functions we're using, and it // probably isn't worth splitting that out on its own at this point. unsigned long long nsec; #if HAVE_CLOCK_GETTIME if( clock_gettime(CLOCK_REALTIME, &ts) ) throw SystemError("clock_gettime failed"); nsec = ms * 1000000ULL + static_cast(ts.tv_nsec); #else timeval tv; if( gettimeofday(&tv, NULL) ) throw SystemError("gettimeofday failed"); ts.tv_sec = tv.tv_sec; nsec = ms * 1000000ULL + static_cast(tv.tv_usec) * 1000ULL; #endif if( nsec < 1000000000 ) { ts.tv_nsec = long(nsec); } else { ts.tv_sec += nsec / 1000000000; ts.tv_nsec = nsec % 1000000000; } } //}}} #if EM_COMPILER_GCC(8,0) && ! EM_COMPILER_GCC(10,2) EM_POP_DIAGNOSTIC #endif std::string stringprintf( const char *format, ... ) { //{{{ // We don't call vstringprintf here because it would mean an // extra copy operation to save a couple of lines of simple // code (and this comment reminding me why I didn't factor it // out the last time I looked at it). va_list arglist; char *s = NULL; std::string str; va_start( arglist, format ); int len = Vasprintf( &s, format, arglist ); if( len >= 0 ) { if( len > 0 ) str = s; free( s ); } else { va_end( arglist ); // Note it is not safe to free s here, it is allowed to be // undefined on failure so we assume it was not allocated. throw SystemError( _("stringprintf failed to expand format") ); } va_end( arglist ); return str; } //}}} std::string vstringprintf( const char *format, va_list arglist ) { //{{{ char *s = NULL; std::string str; int len = Vasprintf( &s, format, arglist ); if( len >= 0 ) { if( len > 0 ) str = s; free( s ); } else { // Note it is not safe to free s here, it is allowed to be // undefined on failure so we assume it was not allocated. throw SystemError( _("vstringprintf failed to expand format") ); } return str; } //}}} std::string timeprintf( const char *format, const timeval &tv ) { //{{{ using std::string; const size_t MAX_LEN = 1024; string timestr; struct tm tm; string::size_type n = 0; // time_t may not be the same size as tv_sec, and indeed on 64-bit Windows // (and possibly OpenBSD among others) time_t is long long while tv_sec is // only long. time_t sec = tv.tv_sec; // First format a string according to the standard specifiers. #if HAVE_LOCALTIME_R localtime_r( &sec, &tm ); #else memcpy( &tm, localtime( &sec ), sizeof( tm ) ); #endif #if EM_PLATFORM_MSW // We lose the ability to have compile time format checking of strftime // by doing this but for that function it's of fairly limited value and // as of gcc-7 we need to disable it here anyway because it doesn't see // that this function itself is format-checked and so still whines that // the format is not a literal even though it just checked that it is! string fmt = format; // Windows doesn't support the %T format option, so expand it manually here. while( ( n = fmt.find( "%T", n, 2 ) ) != string::npos ) fmt.replace( n, 2, "%H:%M:%S" ); n = 0; // Windows doesn't support the %F format option, so expand it manually here. while( ( n = fmt.find( "%F", n, 2 ) ) != string::npos ) fmt.replace( n, 2, "%Y-%m-%d" ); n = 0; #endif // gcc-7 complains about the format parameter passed to strftime, even // though it already checked it when this function itself was called. EM_PUSH_DIAGNOSTIC_IGNORE("-Wformat-nonliteral") #if EM_PLATFORM_LINUX // Querying the string length with a NULL for the output is a GNU extension. // The glibc docs specify that behaviour but SUSv4 does not define it. // Windows doesn't support it, nor does FreeBSD 11 or MacOS 10.12 (Sierra). // It's also tricky to actually test for that, since on systems where it // doesn't work, there is no clear indication except perhaps a segfault. // So we'll be conservative and only use it where we know it really works. size_t size = strftime( NULL, MAX_LEN, format, &tm ); #else size_t size = MAX_LEN; #endif if( size > 0 ) { char buf[ size + 1 ]; #if EM_PLATFORM_MSW if( strftime( buf, size + 1, fmt.c_str(), &tm ) > 0 ) timestr = buf; #else if( strftime( buf, size + 1, format, &tm ) > 0 ) timestr = buf; #endif } EM_POP_DIAGNOSTIC // Then substitute microseconds if required. while( ( n = timestr.find( '%', n ) ) != string::npos ) { string::size_type end = timestr.find( 'u', n ); if( end != string::npos ) { string::size_type len = end - n; char usec[ 7 ]; snprintf( usec, sizeof( usec ), "%06ld", long(tv.tv_usec) ); if( len == 1 ) { timestr.replace( n, len + 1, usec, 6 ); } else if( len == 2 ) { unsigned int width; if( sscanf( string( timestr, n + 1, len - 1 ).c_str(), "%u", &width ) == 1 ) timestr.replace( n, len + 1, usec, std::min(width,6u) ); } n = end; } } return timestr; } //}}} std::string DemangleSymbol( const char *sym ) { //{{{ int status; char *name = abi::__cxa_demangle( sym, NULL, NULL, &status ); switch( status ) { case 0: { std::string s( name ); free( name ); return s; } case -2: { std::string s( 1, '\'' ); s.append( sym ); s.append( "' (not demangled)" ); return s; } case -1: throw Error( "Memory allocation failure in DemangleSymbol" ); case -3: throw Error( "Invalid argument in DemangleSymbol" ); default: throw Error( "Unknown return status (%d) in DemangleSymbol", status ); } } //}}} #ifdef _REENTRANT #if ! THREAD_STACK_SIZE BB_CONST #endif const pthread_attr_t *GetDefaultThreadAttr() { //{{{ #if THREAD_STACK_SIZE static pthread_attr_t a; static bool need_init = true; if( __builtin_expect( need_init, false ) ) { size_t n = THREAD_STACK_SIZE * 1024; size_t d; need_init = false; pthread_attr_init( &a ); int ret = pthread_attr_getstacksize( &a, &d ); if( ret ) throw SystemError( ret, "Failed to get default stack size\n" ); Log<5>( "Initialising pthread_attr for stack size %zu (default is %zu)\n", n, d ); ret = pthread_attr_setstacksize( &a, n ); if( ret ) throw SystemError( ret, "Failed to initialise pthread_attr for stack size %zu\n", n ); } return &a; #else // Use the default attributes if we aren't overriding anything return NULL; #endif } //}}} #endif } #if EM_PLATFORM_MSW // Mingw W64 6.3.0 suggests these functions should be marked 'const', while // the '10-win32' release (rightly) complains if they are ... So once again // muffle the buggy suggestion just for the compiler versions making it, and // we can get rid of this workaround once none of need be supported anymore. // 6.3.0 released with Stretch, so we've got a few more years still before // it falls out of ELTS support. #if EM_COMPILER_GCC(6,0) && ! EM_COMPILER_GCC(10,0) EM_PUSH_DIAGNOSTIC_IGNORE("-Wsuggest-attribute=const") #endif void openlog(const char *ident, int option, int facility) { (void)ident; (void)option; (void)facility; } void syslog(int priority, const char *format, ...) { (void)priority; (void)format; } void vsyslog(int priority, const char *format, va_list ap) { (void)priority; (void)format; (void)ap; } #if EM_COMPILER_GCC(6,0) && ! EM_COMPILER_GCC(10,0) EM_POP_DIAGNOSTIC #endif #endif // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/iniparser.h0000644000000000000000000017555314136173163016377 0ustar //////////////////////////////////////////////////////////////////// // //! @file iniparser.h //! @ingroup INIParsing //! @brief Parser for INI formatted data. // // Copyright 2013 - 2021, Ron // This file is distributed as part of the bit-babbler package. // //////////////////////////////////////////////////////////////////// #ifndef _BB_INIPARSER_H #define _BB_INIPARSER_H #include #include // We don't hash the Section and Option maps by default right now. // Most of the structures we are expecting to handle at this stage will all be // relatively small, so the speed benefit is likely to be minimal (or may even // be non-existant or negative), and the deterministic ordering of a sorted map // is more user friendly for data that users might see. // If we ever need to enable this, we should benchmark it, and then possibly // consider templating the IniData class to allow both. For now, keep it simple. //#define BB_HASH_INIDATA #ifdef BB_HASH_INIDATA #include #else #include #endif // Enable these if you need low level debug output from IniData. //#define BB_DEBUG_INIPARSER //#define BB_DEBUG_INIVALIDATOR // Allow this to be overridden so the unit tests can send it to stdout. #ifndef BB_DEBUG_INI_LOGSTREAM //! Where to output debug logs. #define BB_DEBUG_INI_LOGSTREAM stderr #endif //! Debug logging for the parser implementation. #ifdef BB_DEBUG_INIPARSER #define debug_inip(...) fprintf(BB_DEBUG_INI_LOGSTREAM, ##__VA_ARGS__); #else #define debug_inip(...) #endif //! Debug logging for the validator implementation. #ifdef BB_DEBUG_INIVALIDATOR #define debug_iniv(...) fprintf(BB_DEBUG_INI_LOGSTREAM, ##__VA_ARGS__); #else #define debug_iniv(...) #endif namespace BitB { //! @defgroup INIParsing INI data parsing //! @brief Support for INI formatted data. //! @ingroup DataStorage //!@{ //! Parser and container class for INI format structured data. //{{{ //! The data encoded in this format consists of @b Sections, @b Options and //! @b Values. Since there is no standard definition for this format, the //! rules for this particular parser are defined as follows: //! //! A section definition begins on a line where its name is enclosed in //! square brackets. The section name itself may contain any characters //! except square brackets, and they have no intrinsic special meaning //! except as a string identifier. Any characters following the closing //! square bracket on the same line will simply be ignored. While this may //! be used to add comments there, doing so should probably be discouraged //! as a stylistic idiom, and it may be desirable in the future to be more //! strict and reject any trailing 'junk' following a section header. //! //! All following option/value pairs belong to that section until the next //! section header occurs. Option names may include any characters except //! whitespace. Leading and trailing whitespace around option names and //! values is ignored. Internal whitespace in option values is preserved. //! Options must be defined on a single line, and everything (except leading //! and trailing whitespace) following the option name up to the end of the //! line is part of the value. Quote characters (of any sort) have no //! special meaning to this parser and will be included as a literal part of //! the value (individual applications however may apply any special meaning //! to any character in the option name or value that they please). To this //! parser, both option names and their values are simply literal strings. //! //! Comments must appear on their own line, with the first (non-whitespace) //! character of the line being '#'. //! //! For example: //! @code //! [my-section] //! # Option 1 does something important, this is a comment about it. //! option1 value1 //! # You may not need option 2. //! # option2 value2 3 and4 //! //! [another-section] //! # This option has the same name as one above, //! # but is in a different section. //! option2 some other value //! watchout 1 #note: here '#' is part of the value, not a comment. //! @endcode //! //! Section names must be unique. Multiple appearances of the same section //! name is an error. When multiple sections of the same type are required, //! a sensible convention to use is the form: //! //! @code //! [name:label] //! @endcode //! Where @c name is the type of the section and @c label is a unique identifier //! of the particular instance being defined. But this is merely a convention, //! the ':' character has no special significance beyond what an application //! attributes to particular section names. //! //! Option names must be unique within a section. If an option name is used //! more than once in the same section, then a parsing error will be reported. //! Option values are optional. An option with no value will be assigned the //! empty string as its value. //! //! As an exception to the normal uniqueness rules above, when additional INI //! format data is imported with the @c UpdateWith() method, then any sections //! or options which overlap with already existing data will be merged with it //! without error, allowing such things as reading multiple files with option //! settings in the later files overridding any value set previously. This //! should be used with appropriate caution, since it could also allow typos //! to 'silently' stomp over some other configuration option accidentally. //! //! By default, the above rules are the only constraints applied when parsing //! the input data, no other limit is placed on what section names will be //! acceptable, what options they may contain, or what values those may be //! assigned. Everything the parser reads which is in the correct format will //! be imported. For most application use, some stronger checking will be //! desirable, to quickly spot typos in expected section or option names, or //! names which simply aren't valid for use in the current version. And to //! sanity check that values are of some expected type or form or range. //! The @c Validator class provides a simple way to construct introspective //! checks of the data content to ensure that it is valid in more than just //! its form as generic INI data. //}}} class IniData : public RefCounted { //{{{ public: //! @name Container types //@{ //{{{ //! Container type for %IniData Options and their values. #ifdef EG_HASH_INIDATA typedef was_tr1::unordered_map< std::string, std::string > Options; #else typedef std::map< std::string, std::string > Options; #endif //@} //}}} //! @name Handle type //@{ //{{{ //! Handle type for a @c %IniData parser instance. typedef RefPtr< IniData > Handle; //@} //}}} //! Container for the options in a single INI section. class Section : public RefCounted { //{{{ private: std::string m_name; //!< The name of this section. Options m_options; //!< The Options it contains. public: //! @name Handle type //@{ //{{{ //! Handle type for a @c %Section instance. typedef RefPtr< Section > Handle; //@} //}}} //! @name Container types //@{ //{{{ //! Container type for IniData Sections. #ifdef EG_HASH_INIDATA typedef was_tr1::unordered_map< std::string, Handle > Map; #else typedef std::map< std::string, Handle > Map; #endif //@} //}}} //! @name Constructor //@{ //{{{ //! Construct a new section with the given @a name. Section( const std::string &name ) : m_name( name ) {} //@} //}}} //! @name Section construction methods //@{ //{{{ //! Add a new Option to this %Section. //{{{ //! This method will not alter any existing data, it will just define //! an additional new Option in this %Section. //! //! @param option The identifier name of the new option. //! @param value The value to set for this option. If not provided, //! the option's value will be an empty string. //! //! @exception Error will be thrown if the @a option is already defined //! in this %Section. //}}} void AddOption( const std::string &name, const std::string &value = std::string() ) { //{{{ if( HasOption( name ) ) throw Error( "Duplicated option '%s' in Section '%s'", name.c_str(), m_name.c_str() ); m_options[name] = value; } //}}} //! Change the value of an existing Option. //{{{ //! The option being updated must already exist in this %Section. //! //! @param option The identifier name of the option. //! @param value The new value to set for this option. If not provided, //! the option's value will be an empty string. //! //! @exception Error will be thrown if the @a option is not already //! defined in this %Section. //}}} void UpdateOption( const std::string &name, const std::string &value = std::string() ) { //{{{ if( ! HasOption( name ) ) throw Error( "Option '%s' is not defined in Section '%s'", name.c_str(), m_name.c_str() ); m_options[name] = value; } //}}} //! %Set or change the value of an Option. //{{{ //! The option will be added if it does not already exist, else its value //! will be updated if it does. //! //! @param option The identifier name of the option. //! @param value The value to set for this option. If not provided, //! the option's value will be an empty string. //}}} void AddOrUpdateOption( const std::string &name, const std::string &value = std::string() ) { m_options[name] = value; } //@} //}}} //! @name Removal methods //@{ //{{{ //! Remove an Option from this @c %Section. //{{{ //! @param name The identifier name of the option to remove. // //! @return @c true if an option with that name existed in this section //! and was removed. //}}} bool RemoveOption( const std::string &name ) { return m_options.erase( name ) > 0; } //@} //}}} //! @name Accessor methods //@{ //{{{ //! Return the name of this %Section. const std::string &GetName() const { return m_name; } //! Return @c true if option @a name is defined in this %Section. BB_PURE bool HasOption( const std::string &name ) const { return m_options.find( name ) != m_options.end(); } //! Return the value of option @a name in this %Section. //{{{ //! @exception Error will be thrown if @a name is not defined in //! this %Section. //}}} std::string GetOption( const std::string &name ) const { //{{{ Options::const_iterator i = m_options.find( name ); if( i == m_options.end() ) throw Error( "Section '%s' has no option '%s' defined", m_name.c_str(), name.c_str() ); return i->second; } //}}} //! Query the value of option @a name in this %Section. //{{{ //! This method will not throw if the option is not defined in this //! %Section, it will return the @a default_value provided for it //! instead. //! //! @param name The option that a value is wanted for. //! @param default_value A value to return if @a name is not //! defined in this %Section. //}}} std::string GetOption( const std::string &name, const std::string &default_value ) const { //{{{ Options::const_iterator i = m_options.find( name ); if( i == m_options.end() ) return default_value; return i->second; } //}}} //! Return a map of all options in this %Section. const Options &GetOptions() const { return m_options; } //@} //}}} //! @name Output methods //@{ //{{{ //! Return an INI encoded string of this %Section and its Options. //{{{ //! @note The string returned may not be identical to the one that //! was originally parsed. The ordering of Options may not //! be preserved and insignificant whitespace may be different. //}}} std::string INIStr() const { //{{{ std::string s( '[' + m_name + "]\n" ); for( Options::const_iterator i = m_options.begin(), e = m_options.end(); i != e; ++i ) { if( i->second.empty() ) s.append( i->first + '\n' ); else s.append( i->first + ' ' + i->second + '\n' ); } return s; } //}}} //@} //}}} }; //}}} //! @name Container types //@{ //{{{ //! Container type for %IniData Sections. typedef Section::Map Sections; //@} //}}} //! Generic support for validating INI Sections and Options. //{{{ //! This class makes it easy to define the set of allowable %Section names, //! or patterns of %Section names, and the names of Options and acceptable //! values for them in each of those sections. //}}} class Validator : public RefCounted { //{{{ public: //! @name Test method signatures //@{ //{{{ //! Signature type for functions used to match Section names. //{{{ //! Functions with this signature are passed as the @a method parameter //! of @c Validator::Section() when creating a %Validator instance. //! //! @param expect The string we expect to match against. //! @param seen The string we are checking for a match. //! //! @return @c true if @a seen is a match to what we @a expect, //! according to whatever criteria the implementation //! intends to apply. //! //! Implementations of this are not usually expected to ever throw. //}}} typedef bool(*section_name_test)( const std::string &expect, const std::string &seen ); //! Signature type for functions used to check option values. //{{{ //! Functions with this signature are passed as the @a method parameter //! of @c OptionList::AddTest() when creating a %Validator instance. //! //! @param option The name of the option being checked. //! @param value The value it was assigned. //! //! @exception Error is expected to be thrown if the value is //! not acceptable, along with a message suitable //! for presentation to the end-user explaining why. //}}} typedef void(*option_value_test)( const std::string &option, const std::string &value ); //@} //}}} //! @name Section name test methods //! Standard comparison functions which may be passed as the //! @ref section_name_test parameter to Validator::Section(). //! Alternative test methods may also be provided by other //! application code. //@{ //{{{ //! A @ref section_name_test for section names strictly equal to @a expect. //{{{ //! @param expect The string this test expects to match to. //! @param seen The string we are checking for a match. //! //! @return @c true if @a seen equals @a expect. //}}} static bool SectionNameEquals( const std::string &expect, const std::string &seen ) { return expect == seen; } //! A @ref section_name_test for section names prefixed by @a expect. //{{{ //! @param expect The string prefix this test expects to match to. //! @param seen The string we are checking for a match. //! //! @return @c true if @a seen starts with @a expect. //}}} static bool SectionNamePrefix( const std::string &expect, const std::string &seen ) { return StartsWith( expect, seen ); } //@} //}}} //! @name Option value test methods //! Standard comparison functions which may be passed as the //! @ref option_value_test parameter to OptionList::AddTest(). //! Alternative test methods may also be provided by other //! application code. //@{ //{{{ //! An @ref option_value_test for options which must have some value. //{{{ //! @exception Error will be thrown if @a option does not have a //! non-empty @a value assigned to it. //}}} static void OptionWithValue( const std::string &option, const std::string &value ) { //{{{ if( value.empty() ) throw Error( _("Option '%s' expects a value."), option.c_str() ); } //}}} //! An @ref option_value_test for options which must not have a value. //{{{ //! @exception Error will be thrown if @a option has any non-empty //! @a value assigned to it. //}}} static void OptionWithoutValue( const std::string &option, const std::string &value ) { //{{{ if( ! value.empty() ) throw Error( _("Option '%s' should not have a value assigned."), option.c_str() ); } //}}} //! An @ref option_value_test for options which may optionally have a value. //{{{ //! Using this test permits validating that the @a option name is acceptable //! without placing any (initial) constraint upon its @a value during validation. //! It will accept any value, including an empty one. //}}} static void OptionWithAnyValue( const std::string &option, const std::string &value ) { (void)option; (void)value; } //@} //}}} //! Container for a list of Option validation checks. class OptionList : public RefCounted { //{{{ private: //! @name Container types //@{ //{{{ //! Container type for Option names and the functions to test their values. #ifdef EG_HASH_INIDATA typedef was_tr1::unordered_map< std::string, option_value_test > Tests; #else typedef std::map< std::string, option_value_test > Tests; #endif //@} //}}} //! The map of acceptable Option names to functions for testing their value. Tests m_tests; public: //! @name Handle type //@{ //{{{ //! Handle type for an @c %OptionList instance. typedef RefPtr< OptionList > Handle; //@} //}}} //! @name Constructors //@{ //{{{ //! Create a new, empty, %OptionList. OptionList() {} //! Create a new %OptionList initialised with a single option and its test. //{{{ //! @param option_name The option to recognise and apply this test to. //! @param method The @ref option_value_test used to determine if //! the value assigned to this option is valid. //! //! This is equivalent to: //! @code //! OptionList().AddTest( option_name, method ); //! @endcode //}}} OptionList( const std::string &option_name, option_value_test method ) { m_tests[option_name] = method; } //@} //}}} //! @name Initialiser methods //@{ //{{{ //! Add (or alter) a test for some valid option name. //{{{ //! If there was a previously existing test for @a option_name, it will //! silently be replaced. //! //! @param option_name The option to recognise and apply this test to. //! @param method The @ref option_value_test used to determine if //! the value assigned to this option is valid. //! //! @return A pointer to this @c %OptionList, so that multiple calls to //! this method may be chained together when defining all the //! valid options allowed in some section. //}}} OptionList *AddTest( const std::string &option_name, option_value_test method ) { m_tests[option_name] = method; return this; } //@} //}}} //! @name Validation test methods //@{ //{{{ //! Test that an option name and its value are valid. //{{{ //! @param name The name of the option to validate. //! @param value The value it was assigned. //! //! @exception Error will be thrown if the option name is unknown //! or if the value is not acceptable according to the //! criteria of the test which was specified for it. //}}} void CheckOption( const std::string &name, const std::string &value ) const { //{{{ debug_iniv( " Check option '%s', value '%s'\n", name.c_str(), value.c_str() ); Tests::const_iterator i = m_tests.find( name ); if( i == m_tests.end() ) throw Error( "Unknown option '%s'", name.c_str() ); debug_iniv( " Validating '%s'\n", name.c_str() ); i->second( name, value ); } //}}} //@} //}}} }; //}}} private: //! Container for the validation checks to be performed for some section. class SectionCheck { //{{{ public: //! @name Container types //@{ //{{{ //! Container for the list of Section checks used by a @c Validator. typedef std::list< SectionCheck > List; //@} //}}} private: //! The name of the Section(s) this should check. std::string m_name; //! How to compare m_name for a match to the actual section name. section_name_test m_test; //! The list of Option validation tests for matching Sections. OptionList::Handle m_options; public: //! @name Constructors //@{ //{{{ //! Create a validation check for some INI Section. //{{{ //! @param name The string used to check if this set of tests //! are to be applied to a particular %Section. //! @param nametest The method used to compare @a name to the //! %Section identifier to see if these checks //! are to be applied. It may test for a perfect //! match, or a prefix match, or use any other //! criteria appropriate to the application. //! @param options A list of the validation tests to be applied //! to each of the Options in a matching Section. //}}} SectionCheck( const std::string &name, section_name_test test, const OptionList::Handle &options ) : m_name( name ) , m_test( test ) , m_options( options ) {} //@} //}}} //! @name Accessors //@{ //{{{ //! Test a Section against these validation criteria. //{{{ //! @param s The Section to validate. //! //! @return @c false if the Section identifier does not match this //! set of tests according to the @c section_name_test being //! used and the reference string it is being compared to. //! @c true if the Section identifier was a match and all //! Options it contained validated successfully. //! //! @exception Error will be thrown if this set of tests were applied //! to the Section, but any Option defined in it failed //! validation. //}}} bool CheckSection( const Section::Handle &s ) const { //{{{ // Is this the section we are looking for? if( ! m_test( m_name, s->GetName() ) ) return false; debug_iniv( "Checking [%s] with '%s' validator\n", s->GetName().c_str(), m_name.c_str() ); // If so, are its options all valid? const Options &opts = s->GetOptions(); try { for( Options::const_iterator i = opts.begin(), e = opts.end(); i != e; ++i ) m_options->CheckOption( i->first, i->second ); } catch( const std::exception &e ) { throw Error( _("Section [%s]: %s"), s->GetName().c_str(), e.what() ); } debug_iniv( " Check [%s] passed\n", s->GetName().c_str() ); return true; } //}}} //! Return the string we are testing section names against. const std::string &TestStr() const { return m_name; } //@} //}}} }; //}}} //! The list of Section validation checks to apply. SectionCheck::List m_sections; public: //! @name Handle type //@{ //{{{ //! Handle type for a @c %Validator instance. typedef RefPtr< Validator > Handle; //@} //}}} //! @name Constructors //@{ //{{{ //! Create a new %Validator instance. Validator() {} //@} //}}} //! @name Initialiser methods //@{ //{{{ //! Add tests to validate Section names and the Options they may contain. //{{{ //! @param name The string used to check if this set of tests //! are to be applied to a particular %Section. //! @param method The @ref section_name_test used to compare @a name //! to the %Section identifier to see if these checks //! are to be applied. It may test for a perfect //! match, or a prefix match, or use any other //! criteria appropriate to the application. //! @param options A list of the validation tests to be applied //! to each of the Options in a matching Section. //}}} void Section( const std::string &name, section_name_test method, const OptionList::Handle &options ) { m_sections.push_back( SectionCheck( name, method, options ) ); } //@} //}}} //! @name Validation test methods //@{ //{{{ //! Test INI @a data against the constraints of this %Validator //{{{ //! @exception Error will be thrown if validation fails. //}}} void Validate( const IniData::Handle &data ) const { //{{{ const Sections &s = data->GetSections(); debug_iniv( "Validating %zu INI sections\n", s.size() ); for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) { debug_iniv( "Validate [%s]\n", i->first.c_str() ); for( SectionCheck::List::const_iterator ci = m_sections.begin(), ce = m_sections.end(); ci != ce; ++ci ) { if( ci->CheckSection( i->second ) ) goto check_next_section; debug_iniv( " not matched to '%s' validator\n", ci->TestStr().c_str() ); } throw Error( "Unknown section [%s]", i->first.c_str() ); check_next_section: ; } debug_iniv( "Validated %zu INI sections.\n", s.size() ); } //}}} //! Test INI @a data against the constraints of this %Validator //{{{ //! This method will never throw, if there is an error, then a //! description of it will be returned in the @a error parameter. //! //! If no error occurs, then the content of @a error will be untouched. //! Sane users will generally want to ensure it is empty before this //! method is called, but that is not a hard requirement. If an error //! occurs the content of @a error will be replaced, not appended to. //! //! @return @c true if there was no error validating the @a data. //}}} bool Validate( const IniData::Handle &data, std::string &error ) const { //{{{ try { Validate( data ); return true; } catch( const abi::__forced_unwind& ) { throw; } catch( const std::exception &e ) { error = e.what(); } catch( ... ) { error = "Unknown exception"; } return false; } //}}} //@} //}}} }; //}}} private: //! All sections mapped by name. Sections m_sections; //! @name INI structure parsing //@{ //{{{ //! Return the position of the first non-whitespace byte in @a data after @a pos. //{{{ //! If there is no character that is not insignificant whitespace after //! @a pos, then @c std::string::npos will be returned. //}}} static size_t skip_whitespace( const std::string &data, size_t pos ) { //{{{ return data.find_first_not_of(" \t\n\r", pos); } //}}} //! Return the next line of @a data beginning at @a pos. static std::string get_next_line( const std::string &data, size_t &pos ) { //{{{ // Trim off any leading whitespace. size_t b = skip_whitespace( data, pos ); if( b != std::string::npos ) { // Find the next line break. size_t e = data.find_first_of( "\n\r", b ); // Advance the read pointer to the non-whitespace // character which will start the following line. pos = skip_whitespace( data, e ); //debug_inip( "b = %zu, e = %zu, pos = %zu\n", b, e, pos ); return data.substr( b, e - b ); } pos = b; return std::string(); } //}}} //! Parse a line of text containing a Section header. Section::Handle parse_section( const std::string &s, bool allow_duplicates ) { //{{{ using std::string; debug_inip( "begin section: '%s'\n", s.c_str() ); size_t n = s.find_first_of(']'); if( n == string::npos || s.size() < 3 ) throw Error( "Invalid section '%s'", s.c_str() ); string name = s.substr( 1, n - 1 ); debug_inip( "section name: '%s'\n", name.c_str() ); if( allow_duplicates ) return AddOrGetSection( name ); return AddSection( name ); } //}}} //! Parse a line of text containing an option for @a section. void parse_option( const Section::Handle §ion, const std::string &s, bool allow_duplicates ) { //{{{ using std::string; // Caller already stripped leading whitespace from the line. size_t n1 = s.find_first_of(" \t"); // Find end of option size_t n2 = skip_whitespace( s, n1 ); // Find start of value size_t n3 = s.find_last_not_of(" \t\n\r" ); // Strip trailing whitespace debug_inip( "scan option: '%s' -- n1 %zu, n2 %zu, n3 %zu\n", s.c_str(), n1, n2, n3); string opt = s.substr( 0, n1 ); string val = (n2 != string::npos) ? s.substr( n2, n3 - (n2 - 1) ) : string(); debug_inip( "have option: '%s', value: '%s'\n", opt.c_str(), val.c_str() ); if( allow_duplicates ) section->AddOrUpdateOption( opt, val ); else section->AddOption( opt, val ); } //}}} //! Parse a block of INI formatted data. void parse( const std::string &data, bool allow_duplicates = false ) { //{{{ using std::string; Section::Handle current_section; size_t pos = 0; while( pos != string::npos ) { //debug_inip( "pos = %zu\n", pos ); string s = get_next_line( data, pos ); if( s.empty() ) continue; switch( s[0] ) { case '#': debug_inip( "skipping comment: '%s'\n", s.c_str() ); break; case '[': current_section = parse_section( s, allow_duplicates ); break; default: parse_option( current_section, s, allow_duplicates ); } } } //}}} //@} //}}} public: //! @name Constructors //@{ //{{{ //! Default constructor for a new empty parser. IniData() {} //! Construct a new instance, parsing a block of INI @a data from a string. //{{{ //! @exception Various exceptions may be thrown if the @ data string //! is not a valid INI structure. //}}} IniData( const std::string &data ) { parse( data ); } //! Construct a new instance, parsing a block of INI @a data from a string. //{{{ //! This constructor will never throw. Instead, if there is an error, //! then a description of it will be returned in the @a error parameter. //! If an error is returned, then the content of this parser instance //! is undefined and no attempt should be made to access it. //! //! If no error occurs, then the content of @a error will be untouched. //! Sane users will generally want to ensure it is empty before this //! method is called, but that is not a hard requirement. If an error //! occurs the content of @a error will be replaced, not appended to. //}}} IniData( const std::string &data, std::string &error ) { Decode( data, error ); } //@} //}}} //! @name Generic container operations //@{ //{{{ //! Erases all data currently held in this parser instance. void clear() { m_sections.clear(); } //! Return @c true if this parser contains no data. //{{{ //! It will return @c false if it contains any sections, even if they //! have no options defined in them. //}}} bool empty() const { return m_sections.empty(); } //@} //}}} //! @name Initialiser methods //@{ //{{{ //! Decode a new block of INI @a data from a string. //{{{ //! The existing content of this parser, if any, will be replaced by //! the new data. //! //! @exception Various exceptions may be thrown if the @a data string //! is not a valid INI structure. //! //! If this method throws an exception, then the parser should be //! considered to be in an indefinite state (at present, the options //! which were successfully parsed prior to the error will be included //! in it, while any following options will not - but applications //! should not rely on that behaviour in any way as it is strictly an //! implementation detail which could change without warning in some //! future revision). //}}} void Decode( const std::string &data ) { //{{{ clear(); parse( data ); } //}}} //! Decode a new block of INI @a data from a string. //{{{ //! The existing content of this parser, if any, will be replaced by //! the new data. //! //! This method will never throw. Instead, if there is an error, then //! a description of it will be returned in the @a error parameter. //! If an error is returned, then the content of this parser instance //! is undefined and no attempt should be made to access it. //! //! If no error occurs, then the content of @a error will be untouched. //! Sane users will generally want to ensure it is empty before this //! method is called, but that is not a hard requirement. If an error //! occurs the content of @a error will be replaced, not appended to. //! //! @return @c true if there was no error parsing the @a data. //}}} bool Decode( const std::string &data, std::string &error ) { //{{{ try { Decode( data ); return true; } catch( const abi::__forced_unwind& ) { throw; } catch( const std::exception &e ) { error = e.what(); } catch( ... ) { error = "Unknown exception"; } return false; } //}}} //! Decode a(nother) block of INI @a data from a string. //{{{ //! The existing content of this parser, if any, will @b not be replaced //! by the new data, it will simply be added to it in the same way as if //! it has been appended to any existing data when that was parsed. This //! means that any duplicate %Section names declared in this new @a data //! will be considered an error. //! //! @exception Various exceptions may be thrown if the @a data string //! is not a valid INI structure. //! //! If this method throws an exception, then the parser should be //! considered to be in an indefinite state (at present, the options //! which were successfully parsed prior to the error will be included //! in it, while any following options will not - but applications //! should not rely on that behaviour in any way as it is strictly an //! implementation detail which could change without warning in some //! future revision). //}}} void DecodeMore( const std::string &data ) { parse( data ); } //! Decode a(nother) block of INI @a data from a string. //{{{ //! The existing content of this parser, if any, will @b not be replaced //! by the new data, it will simply be added to it in the same way as if //! it has been appended to any existing data when that was parsed. This //! means that any duplicate %Section names declared in this new @a data //! will be considered an error. //! //! This method will never throw. Instead, if there is an error, then //! a description of it will be returned in the @a error parameter. //! If an error is returned, then the content of this parser instance //! is undefined and no attempt should be made to access it. //! //! If no error occurs, then the content of @a error will be untouched. //! Sane users will generally want to ensure it is empty before this //! method is called, but that is not a hard requirement. If an error //! occurs the content of @a error will be replaced, not appended to. //! //! @return @c true if there was no error parsing the @a data. //}}} bool DecodeMore( const std::string &data, std::string &error ) { //{{{ try { parse( data ); return true; } catch( const abi::__forced_unwind& ) { throw; } catch( const std::exception &e ) { error = e.what(); } catch( ... ) { error = "Unknown exception"; } return false; } //}}} //! Update the existing options with a block of INI @a data from a string. //{{{ //! The existing content of this parser, if any, will be appended to or //! updated by the new data. It is not an error for it to contain //! %Sections and %Options which have been already defined, the new values //! will simply replace any old ones which already existed, and add any //! which previously did not. //! //! @exception Various exceptions may be thrown if the @a data string //! is not a valid INI structure. //! //! If this method throws an exception, then the parser should be //! considered to be in an indefinite state (at present, the options //! which were successfully parsed prior to the error will be included //! in it, while any following options will not - but applications //! should not rely on that behaviour in any way as it is strictly an //! implementation detail which could change without warning in some //! future revision). //}}} void UpdateWith( const std::string &data ) { parse( data, true ); } //! Update the existing options with a block of INI @a data from a string. //{{{ //! The existing content of this parser, if any, will be appended to or //! updated by the new data. It is not an error for it to contain //! %Sections and %Options which have been already defined, the new values //! will simply replace any old ones which already existed, and add any //! which previously did not. //! //! This method will never throw. Instead, if there is an error, then //! a description of it will be returned in the @a error parameter. //! If an error is returned, then the content of this parser instance //! is undefined and no attempt should be made to access it. //! //! If no error occurs, then the content of @a error will be untouched. //! Sane users will generally want to ensure it is empty before this //! method is called, but that is not a hard requirement. If an error //! occurs the content of @a error will be replaced, not appended to. //! //! @return @c true if there was no error parsing the @a data. //}}} bool UpdateWith( const std::string &data, std::string &error ) { //{{{ try { parse( data, true ); return true; } catch( const abi::__forced_unwind& ) { throw; } catch( const std::exception &e ) { error = e.what(); } catch( ... ) { error = "Unknown exception"; } return false; } //}}} //! Add a new @c %Section. //{{{ //! This method will not alter any existing data, it will just create //! an additional new @c Section. //! //! @param name The identifier for the new section. //! //! @return A handle to the newly created section. //! //! @exception Error will be thrown if @a name is already definied as //! an existing section. //}}} Section::Handle AddSection( const std::string &name ) { //{{{ if( m_sections.find( name ) != m_sections.end() ) throw Error( "Duplicated section [%s]", name.c_str() ); m_sections[ name ] = new Section( name ); return m_sections[name]; } //}}} //! Add a new Option to a @c %Section. //{{{ //! This method will not alter any existing data, it will just define //! an additional new Option in some @c Section. //! //! @param section A handle to the section that the option is to be //! added to. //! @param option The identifier name of the new option. //! @param value The value to set for this option. If not provided, //! the option's value will be an empty string. //! //! @note It is the caller's responsibility to ensure that @a section //! is a valid handle to an existing %Section. //! //! @exception Error will be thrown if the @a option is already defined //! in this @a section. //}}} void AddOption( const Section::Handle §ion, const std::string &option, const std::string &value = std::string() ) { section->AddOption( option, value ); } //! Add a new Option to a named @c %Section. //{{{ //! This method will not alter any existing data, it will just define //! an additional new Option in some @c Section. //! //! @param section The name of the section that the option is to be //! added to. //! @param option The identifier name of the new option. //! @param value The value to set for this option. If not provided, //! the option's value will be an empty string. //! //! @exception Error will be thrown if the @a option is already defined //! in this @a section, or if the section does not already //! exist. //}}} void AddOption( const std::string §ion, const std::string &option, const std::string &value = std::string() ) { GetSection( section )->AddOption( option, value ); } //! Change the value of an existing Option. //{{{ //! The option being updated must already exist in the given @a section. //! //! @param section A handle to the section where the option is found. //! @param option The identifier name of the option. //! @param value The new value to set for this option. If not provided, //! the option's value will be an empty string. //! //! @note It is the caller's responsibility to ensure that @a section //! is a valid handle to an existing %Section. //! //! @exception Error will be thrown if the @a option is not already //! defined in this @a section. //}}} void UpdateOption( const Section::Handle §ion, const std::string &option, const std::string &value = std::string() ) { section->UpdateOption( option, value ); } //! Change the value of an existing Option. //{{{ //! The option being updated, and the @c Section it is contained in, //! must already exist. //! //! @param section The name of the section that the option is to be //! added to. //! @param option The identifier name of the option. //! @param value The new value to set for this option. If not provided, //! the option's value will be an empty string. //! //! @exception Error will be thrown if the @a option is not already //! defined in this @a section, or if the section does not //! already exist. //}}} void UpdateOption( const std::string §ion, const std::string &option, const std::string &value = std::string() ) { GetSection( section )->UpdateOption( option, value ); } //! %Set or change the value of an Option. //{{{ //! The option will be added if it does not already exist, else its value //! will be updated if it does. //! //! @param section A handle to the section where the option is found. //! @param option The identifier name of the option. //! @param value The value to set for this option. If not provided, //! the option's value will be an empty string. //! //! @note It is the caller's responsibility to ensure that @a section //! is a valid handle to an existing %Section. //}}} void AddOrUpdateOption( const Section::Handle §ion, const std::string &option, const std::string &value = std::string() ) { section->AddOrUpdateOption( option, value ); } //! %Set or change the value of an Option. //{{{ //! The option will be added if it does not already exist, else its value //! will be updated if it does. If the section does not already exist, //! then it will be created too. //! //! @param section The name of the section that the option is to be //! added to. //! @param option The identifier name of the option. //! @param value The value to set for this option. If not provided, //! the option's value will be an empty string. //}}} void AddOrUpdateOption( const std::string §ion, const std::string &option, const std::string &value = std::string() ) { AddOrGetSection( section )->AddOrUpdateOption( option, value ); } //@} //}}} //! @name Removal methods //@{ //{{{ //! Remove a @c %Section. //{{{ //! @param name The identifier of the section to remove. //! //! @return @c true if a section with that name existed and was removed. //}}} bool RemoveSection( const std::string &name ) { return m_sections.erase( name ) > 0; } //! Remove an Option from a @c %Section. //{{{ //! @param section A handle to the section that the option is to be //! removed from. //! @param option The identifier name of the option to remove. //! //! @note It is the caller's responsibility to ensure that @a section //! is a valid handle to an existing %Section. //! //! @return @c true if an option with that name existed in that section //! and was removed. //}}} bool RemoveOption( const Section::Handle §ion, const std::string &option ) { return section->RemoveOption( option ); } //! Remove an Option from a @c %Section. //{{{ //! @param section The name of the section that the option is to be //! removed from. //! @param option The identifier name of the option to remove. //! //! @return @c true if an option with that name existed in a section //! with that name and was removed. //}}} bool RemoveOption( const std::string §ion, const std::string &option ) { //{{{ Sections::const_iterator i = m_sections.find( section ); if( i == m_sections.end() ) return false; return i->second->RemoveOption( option ); } //}}} //@} //}}} //! @name Accessor methods //@{ //{{{ //! Return a map of all sections. const Sections &GetSections() const { return m_sections; } //! Return a map of all sections with names matching the given @a prefix. //{{{ //! The keys of the returned map are the trailing portions of the matching //! section names not including the prefix. To get the full section name //! you can call the @c Section::GetName() method (if the @a prefix is no //! longer available to prepend to the key). //}}} Sections GetSections( const std::string &prefix ) const { //{{{ Sections s; size_t n = prefix.size(); for( Sections::const_iterator i = m_sections.begin(), e = m_sections.end(); i != e; ++i ) { if( StartsWith( prefix, i->first ) ) s[i->first.substr(n)] = i->second; } return s; } //}}} //! Return @c true if %Section @a name is defined. BB_PURE bool HasSection( const std::string &name ) const { return m_sections.find( name ) != m_sections.end(); } //! Return a handle to %Section @a name. //{{{ //! @param name The identifier of the requested Section. //! //! @exception Error will be thrown if the %Section is not defined. //}}} Section::Handle GetSection( const std::string &name ) const { //{{{ Sections::const_iterator i = m_sections.find( name ); if( i == m_sections.end() ) throw Error( "Section [%s] is not defined", name.c_str() ); return i->second; } //}}} //! Return a handle to %Section @a name. //{{{ //! If the @c Section was not already defined, it will be created and a //! handle to the new empty @c %Section structure will be returned. //! //! @param name The identifier of the requested Section. //}}} Section::Handle AddOrGetSection( const std::string &name ) { //{{{ if( ! HasSection( name ) ) return AddSection( name ); return m_sections[name]; } //}}} //! Return a map of all options defined in @a section. //{{{ //! @note It is the caller's responsibility to ensure that @a section //! is a valid handle to an existing %Section. //}}} const Options &GetOptions( const Section::Handle §ion ) const { return section->GetOptions(); } //! Return a map of all options defined in the named @a section. //{{{ //! @exception Error will be thrown if @a section is not defined. //}}} const Options &GetOptions( const std::string §ion ) const { return GetSection( section )->GetOptions(); } //! Return @c true if @a option is defined in @a section. //{{{ //! @note It is the caller's responsibility to ensure that @a section //! is a valid handle to an existing %Section. //}}} bool HasOption( const Section::Handle §ion, const std::string &option ) const { return section->HasOption( option ); } //! Return @c true if @a option is defined in the named @a section. //{{{ //! This will return @c false if either the section itself is not defined, //! or if the option is not defined within it. //}}} bool HasOption( const std::string §ion, const std::string &option ) const { //{{{ Sections::const_iterator i = m_sections.find( section ); if( i == m_sections.end() ) return false; return i->second->HasOption( option ); } //}}} //! Return the value of @a option defined in @a section. //{{{ //! @note It is the caller's responsibility to ensure that @a section //! is a valid handle to an existing %Section. //! //! @exception Error will be thrown if @a option is not defined in //! the given @a section. //}}} std::string GetOption( const Section::Handle §ion, const std::string &option ) const { return section->GetOption( option ); } //! Return the value of @a option defined in the named @a section. //{{{ //! @exception Error will be thrown if @a option is not defined in //! the given @a section, or if the section itself is not //! defined. //}}} std::string GetOption( const std::string §ion, const std::string &option ) const { return GetSection( section )->GetOption( option ); } //! Query the value of @a option in a @a section. //{{{ //! This method will not throw if the option is not defined in the //! given section, it will return the @a default_value provided for //! it instead. //! //! @param section A handle to the selected Section. //! @param option The option that a value is wanted for. //! @param default_value A value to return if @a option is not //! defined in the @a section. //! //! @note It is the caller's responsibility to ensure that @a section //! is a valid handle to an existing %Section. //}}} std::string GetOption( const Section::Handle §ion, const std::string &option, const std::string &default_value ) const { return section->GetOption( option, default_value ); } //! Query the value of @a option in the named @a section. //{{{ //! This method will not throw if the option is not defined in the //! given section, or if the section itself is not defined. It will //! return the @a default_value provided for it instead. //! //! @param section The name of the selected Section. //! @param option The option that a value is wanted for. //! @param default_value A value to return if @a option is not //! defined in the @a section. //}}} std::string GetOption( const std::string §ion, const std::string &option, const std::string &default_value ) const { //{{{ Sections::const_iterator i = m_sections.find( section ); if( i == m_sections.end() ) return default_value; return i->second->GetOption( option, default_value ); } //}}} //@} //}}} //! @name Output methods //@{ //{{{ //! Return an INI encoded string of the structures in this parser //{{{ //! @note The string returned may not be identical to the one that //! was originally parsed. The ordering of Sections and //! Options may not be preserved and insignificant whitespace //! may be different. //}}} std::string INIStr() const { //{{{ std::string s; for( Sections::const_iterator i = m_sections.begin(), e = m_sections.end(); i != e; ++i ) s.append( i->second->INIStr() + '\n' ); return s; } //}}} //@} //}}} }; //}}} //!@} } // BitB namespace // Don't let these leak outside this file, nobody else should use them, and // we avoid a possible conflict with some exernal dependency included later. #undef debug_inip #undef debug_iniv #endif // _BB_INIPARSER_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/json.h0000644000000000000000000031506414136173163015345 0ustar //////////////////////////////////////////////////////////////////// // //! @file json.h //! @ingroup JsonParsing //! @brief Parser for JSON formatted data. // // Copyright 2013 - 2021, Ron // This file is distributed as part of the bit-babbler package. // //////////////////////////////////////////////////////////////////// #ifndef _BB_JSON_H #define _BB_JSON_H #include #include #include #include #include #include #include #if HAVE_XLOCALE_H #include #endif // We don't hash the Object member map by default right now. // Most of the structures we are expecting to handle at this stage will all be // relatively small, so the speed benefit is likely to be minimal (or may even // be non-existant or negative), and the deterministic ordering of a sorted map // is more user friendly for data that users might see. // If we ever need to enable this, we should benchmark it, and then possibly // consider templating the Json class to allow both. For now, keep it simple. //#define BB_HASH_JSON_OBJECT_MEMBERS #ifdef BB_HASH_JSON_OBJECT_MEMBERS #include #else #include #endif namespace BitB { //! @defgroup JsonParsing JSON data parsing //! @brief Support for JSON formatted data. //! @ingroup DataStorage //!@{ //! Parser and container class for JSON encoded data //{{{ //! This class may be used to decompose and/or generate UTF-8 encoded JSON //! data strings in the format described by //! RFC 7159. //! //! If the macro @c STRICT_RFC4627_COMPATIBILITY is defined before including //! the @c json.h header, then the older semantics of requiring that the root //! of the JSON text be either an object or an array will be enforced, as //! described in the obsoleted //! RFC 4627 specification. //! Applications built using the RFC 7159 mode can still check JSON text for //! RFC 4627 compliance at runtime by confirming the @c Json::RootType() is //! a @c Json::ObjectType or @c Json::ArrayType. //! //! In the present implementation only Objects with unique member names are //! supported. This is an //! RFC 2119 @b SHOULD requirement, and currently there are no users //! of this with extenuating circumstances that warrant the extra complexity //! to end use which would come with being an exception to that. A lookup //! by key name will always return either zero or one value, never a list //! that may in turn need to be iterated over to obtain the desired value. //}}} class Json : public RefCounted { //{{{ public: //! @name Data types //@{ //{{{ //! JSON data type identifiers enum DataType { NullType, //!< The JSON @c null primitive type BoolType, //!< The JSON boolean primitive type NumberType, //!< The JSON number primitive type StringType, //!< The JSON string primitive type ArrayType, //!< The JSON array structured type ObjectType //!< The JSON object structured type }; //! Container type for a list of Object member names typedef std::list< std::string > MemberList; //@} //}}} //! @name Data type strings //@{ //{{{ //! Return a descriptive string for a JSON @c DataType static const char *DataTypeStr( DataType t ) { switch( t ) { case NullType: return "Null"; case BoolType: return "Bool"; case NumberType: return "Number"; case StringType: return "String"; case ArrayType: return "Array"; case ObjectType: return "Object"; } return "Unknown JSON type"; } //@} //}}} //! @name String escaping //@{ //{{{ //! Return a JSON escaped copy of a string static std::string Escape( const std::string &str ) { //{{{ std::string s; for( std::string::const_iterator i = str.begin(), e = str.end(); i != e; ++i ) { switch( *i ) { case '"': s += "\\\""; break; case '\\': s += "\\\\"; break; // case '/': s += "\\/"; break; // escaping this one is optional case '\b': s += "\\b"; break; case '\f': s += "\\f"; break; case '\n': s += "\\n"; break; case '\r': s += "\\r"; break; case '\t': s += "\\t"; break; default: if( uint8_t(*i) < 0x20 ) s += stringprintf("\\u00%02x", *i); else s.push_back( *i ); break; } } return s; } //}}} static uint16_t HexStrTo16( const char *str ) { //{{{ uint16_t v; switch( sscanf( str, "%hx", &v ) ) { case 1: return v; case 0: throw Error( "HexStrTo16( '%s' ): invalid conversion", str ); case EOF: throw Error( "HexStrTo16( '%s' ): invalid input", str ); default: throw SystemError( "HexStrTo16( '%s' ): sscanf failure", str ); } } //}}} //! Return a copy of a string with any JSON escaping undone static std::string Unescape( const std::string &str ) { //{{{ std::string s; size_t p = 0; uint32_t lead_surrogate = 0; size_t trail_surrogate_p = 0; for(;;) { size_t n = str.find('\\', p); if( n == std::string::npos ) { s.append( str.substr(p) ); break; } s.append( str.substr(p, n - p) ); switch( str[++n] ) { case '"': case '/': p = n; break; case '\\': s += '\\'; p = n + 1; break; case 'b': s += '\b'; p = n + 1; break; case 'f': s += '\f'; p = n + 1; break; case 'n': s += '\n'; p = n + 1; break; case 'r': s += '\r'; p = n + 1; break; case 't': s += '\t'; p = n + 1; break; case 'u': { // This could throw, but if it does that's ok, since it // would mean the string we are unescaping is invalid. std::string h = str.substr( n + 1, 4 ); uint16_t v = HexStrTo16( h.c_str() ); if( IsUTF16LeadingSurrogate(v) ) { lead_surrogate = uint32_t(v) << 10; trail_surrogate_p = n + 6; } else if( IsUTF16TrailingSurrogate(v) ) { const uint32_t surrogate_offset = (0xD800u << 10) + 0xDC00 - 0x10000; if( n == trail_surrogate_p ) AppendAsUTF8( s, v + lead_surrogate - surrogate_offset ); } else AppendAsUTF8( s, v ); p = n + 5; break; } } } return s; } //}}} //@} //}}} //! Container for an individual data element in the JSON structure class Data : public RefCounted { //{{{ public: //! %Handle type for @c Json data structures. //{{{ //! We derive this type from @c RefPtr rather than using a simple //! typedef so that we can provide additional operators to directly //! access @c Object members and @c Array elements without needing //! clumsy syntax to first dereference the @c %Handle. //}}} template< typename T > class HandleType : public RefPtr< T > { //{{{ public: //! @name Constructors //@{ //{{{ //! Contruct a handle to a raw @c Data @a ptr. //{{{ //! This constructor takes ownership of @a ptr and will destroy //! it when the last handle to it is destroyed. //}}} HandleType( T *ptr = NULL ) : RefPtr( ptr ) {} //! Construct a handle from a reference to an object that we should not destroy. //{{{ //! This constructor is complementary to the one which takes a pointer //! to an object in that it will create a handle to the object, but it //! will @e not take ownership of that object if it is not already //! owned by another RefPtr to it. //! It may be used to create a handle to a @c RefCounted object that //! was created on the stack or in some other context which already //! manages its lifetime. //! //! If the reference count of @a obj is zero when it is passed to //! this constructor, it will be incremented before normal reference //! counting begins and the object's reference count will never drop //! to zero without manual intervention. It is the responsibility of //! the implementer to ensure that the object is correctly destroyed //! @e and that it outlives this @c RefPtr handle and any of its copies. //! //! If the reference count of @a obj is non zero then this constructor //! will effectively behave the same as if RefPtr( T* ) was //! called. //! //! It is possible to safety check @a obj prior to destruction by //! testing its refcount which should be 0 if a handle to it was //! never created, 1 if handles were created that have now all been //! destroyed, and > 1 if handles still exist that have not been //! destroyed yet. //}}} HandleType( T &obj ) : RefPtr( obj ) {} //! Copy constructor. //{{{ //! @note This may look irrelevant, but the templated version //! will NOT stop the compiler from creating a default //! copy constructor. //}}} HandleType( const Handle &ptr ) : RefPtr( ptr ) {} //@} //}}} //! @name Primitive data type accessors //@{ //{{{ //! Implicit conversion of JSON numeric primitive @c Data //{{{ //! @exception Error will be thrown if this is not a handle //! to a @c NumberData type. //}}} operator double() const { return RefPtr::Raw()->Number(); } //! Implicit conversion of JSON string primitive @c Data //{{{ //! @exception Error will be thrown if this is not a handle //! to a @c StringData type. //}}} operator const std::string&() const { return RefPtr::Raw()->String(); } //@} //}}} //! @name Structured data type accessors //@{ //{{{ //! Return a member of a JSON Object or element of a JSON Array //{{{ //! @param key_or_index The Object @a key string or Array @a index number //! for the data being requested. //! //! If a string @a key is passed, then an Object lookup will be performed. //! If an integer @a index is passed then an Array access will be attempted. //! If any other type is passed then a compile time failure should occur. //! //! For Object member requests: //! //! @exception Error will be thrown if the member does not exist, //! or if this is not a handle to an @c ObjectData type. //! //! For Array element requests: //! //! @exception Error will be thrown if this is not a handle to an //! @c ArrayData type. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements contained in the //! Array. //}}} template< typename U > HandleType< Data > operator[]( const U &key_or_index ) const { return (*RefPtr::Raw())[key_or_index]; } //@} //}}} //! @name Comparison operators //@{ //{{{ //! Return @c true if there is an object to dereference. //{{{ //! Because this specialised @c RefPtr has an implicit conversion //! to @c double operator, we can't just use the normal idiom for //! testing if it actually contains anything. //! //! For example: //! @code //! // This is what we'd usually do, and clearly says what it means, //! // but ISO C++ says these two possible resolutions are ambiguous: //! // RefPtr::operator!=(T*) //! // operator!=(double, long int) //! // Thanks to the historically silly definition of NULL in C++. //! if( handle != NULL ) //! ... //! //! // This resolves that, but eww ... //! if( handle != static_cast< HandleType >(NULL) ) //! ... //! //! // This also works, but the double negation trick isn't usually //! // something it's nice to scatter through user-facing code. //! if( !!handle ) //! ... //! //! // So we provide this easy-to-read-at-a-glance option instead. //! if( handle.IsNotNULL() ) //! ... //! @endcode //! //! So the only trick here now, is to not confuse this functionality //! with the operation of the @c Data::IsNull accessor for the JSON //! @c null literal primitive type. Note the distinction between //! @c NULL, and @c Null or @c null. //}}} bool IsNotNULL() const { return RefPtr::Raw() != NULL; } //@} //}}} }; //}}} //! @name Handle type //@{ //{{{ //! %Handle type for a @c Json::Data element. typedef HandleType< Data > Handle; //@} //}}} //! @name Data type query //@{ //{{{ //! Return the JSON @c #DataType of this element virtual DataType Type() const = 0; //@} //}}} //! @name Generic container operations //@{ //{{{ //! Return @c true if this is an object or array which contains no data //{{{ //! Primitive data types are never empty, not even the @c null type //! or an empty string, so they will always return @c false here. //! An 'empty' primitive type does not exist to be able to call this //! method on at all. //}}} virtual bool empty() const { return false; } //@} //}}} //! @name JSON object construction methods //@{ //{{{ //! Add a new @c null member to this Object virtual void AddMember( const std::string &name ) { throw Error( "%s::AddMember( %s ): not an Object type (%s)", EM_TYPEOF(*this), name.c_str(), JSONStr().c_str() ); } //! Add a new boolean member to this Object virtual void AddMember( const std::string &name, bool value ) { (void)value; throw Error( "%s::AddMember( %s ): not an Object type (%s)", EM_TYPEOF(*this), name.c_str(), JSONStr().c_str() ); } //! Add a new numeric member to this Object virtual void AddMember( const std::string &name, double value ) { (void)value; throw Error( "%s::AddMember( %s ): not an Object type (%s)", EM_TYPEOF(*this), name.c_str(), JSONStr().c_str() ); } //! Add a new numeric member to this Object virtual void AddMember( const std::string &name, int value ) { AddMember( name, double(value) ); } //! Add a new numeric member to this Object virtual void AddMember( const std::string &name, long value ) { AddMember( name, double(value) ); } //! Add a new numeric member to this Object virtual void AddMember( const std::string &name, long long value ) { AddMember( name, double(value) ); } //! Add a new numeric member to this Object virtual void AddMember( const std::string &name, unsigned int value ) { AddMember( name, double(value) ); } //! Add a new numeric member to this Object virtual void AddMember( const std::string &name, unsigned long value ) { AddMember( name, double(value) ); } //! Add a new numeric member to this Object virtual void AddMember( const std::string &name, unsigned long long value ) { AddMember( name, double(value) ); } //! Add a new string member to this Object virtual void AddMember( const std::string &name, const std::string &value ) { (void)value; throw Error( "%s::AddMember( %s ): not an Object type (%s)", EM_TYPEOF(*this), name.c_str(), JSONStr().c_str() ); } //! Add a new string member to this Object virtual void AddMember( const std::string &name, const char *value ) { AddMember( name, std::string(value) ); } //! Add a new object member to this Object //{{{ //! @return A handle to the newly created object which can then //! be used to add members to it. //}}} virtual Data::Handle AddObject( const std::string &name ) { throw Error( "%s::AddObject( %s ): not an Object type (%s)", EM_TYPEOF(*this), name.c_str(), JSONStr().c_str() ); } //! Add a new array member to this Object //{{{ //! @return A handle to the newly created array which can then //! be used to add elements to it. //}}} virtual Data::Handle AddArray( const std::string &name ) { throw Error( "%s::AddArray( %s ): not an Object type (%s)", EM_TYPEOF(*this), name.c_str(), JSONStr().c_str() ); } //@} //}}} //! @name JSON array construction methods //@{ //{{{ //! Append a new @c null element to this Array virtual void AddElement() { throw Error( "%s::AddElement(): not an Array type (%s)", EM_TYPEOF(*this), JSONStr().c_str() ); } //! Append a new boolean element to this Array virtual void AddElement( bool value ) { throw Error( "%s::AddElement( %s ): not an Array type (%s)", EM_TYPEOF(*this), value ? "true" : "false", JSONStr().c_str() ); } //! Append a new numeric element to this Array virtual void AddElement( double value ) { throw Error( "%s::AddElement( %g ): not an Array type (%s)", EM_TYPEOF(*this), value, JSONStr().c_str() ); } //! Append a new numeric element to this Array virtual void AddElement( long long value ) { throw Error( "%s::AddElement( %lld ): not an Array type (%s)", EM_TYPEOF(*this), value, JSONStr().c_str() ); } //! Append a new numeric element to this Array virtual void AddElement( long value ) { throw Error( "%s::AddElement( %ld ): not an Array type (%s)", EM_TYPEOF(*this), value, JSONStr().c_str() ); } //! Append a new numeric element to this Array virtual void AddElement( int value ) { AddElement( long(value) ); } //! Append a new numeric element to this Array virtual void AddElement( unsigned long long value ) { throw Error( "%s::AddElement( %llu ): not an Array type (%s)", EM_TYPEOF(*this), value, JSONStr().c_str() ); } //! Append a new numeric element to this Array virtual void AddElement( unsigned long value ) { throw Error( "%s::AddElement( %lu ): not an Array type (%s)", EM_TYPEOF(*this), value, JSONStr().c_str() ); } //! Append a new numeric element to this Array virtual void AddElement( unsigned int value ) { AddElement( static_cast(value) ); } //! Append a new string element to this Array virtual void AddElement( const std::string &value ) { throw Error( "%s::AddElement( %s ): not an Array type (%s)", EM_TYPEOF(*this), value.c_str(), JSONStr().c_str() ); } //! Append a new string element to this Array virtual void AddElement( const char *value ) { AddElement( std::string(value) ); } //! Append a new object element to this Array //{{{ //! @return A handle to the newly created object which can then //! be used to add members to it. //}}} virtual Data::Handle AddObject() { throw Error( "%s::AddObject(): not an Array type (%s)", EM_TYPEOF(*this), JSONStr().c_str() ); } //! Append a new array element to this Array //{{{ //! @return A handle to the newly created array which can then //! be used to add elements to it. //}}} virtual Data::Handle AddArray() { throw Error( "%s::AddArray(): not an Array type (%s)", EM_TYPEOF(*this), JSONStr().c_str() ); } //@} //}}} //! @name Primitive type accessors //@{ //{{{ //! Return @c true if this element is the @c null literal primitive type bool IsNull() const { return Type() == NullType; } //! @brief Return the boolean value of this element //! //! @exception Error will be thrown if this is not a boolean primitive type virtual bool IsTrue() const { throw Error( "%s is not a Boolean type (%s)", EM_TYPEOF(*this), JSONStr().c_str() ); } //! @brief Return the numeric value of this element //! //! @exception Error will be thrown if this is not a numeric primitive type virtual double Number() const { throw Error( "%s is not a Number type (%s)", EM_TYPEOF(*this), JSONStr().c_str() ); } //! @brief Return the string value of this element //! //! @exception Error will be thrown if this is not a string primitive type virtual const std::string &String() const { throw Error( "%s is not a String type (%s)", EM_TYPEOF(*this), JSONStr().c_str() ); } //! @brief Implicit conversion of numeric primitive @c %Data //! //! @exception Error will be thrown if this is not a numeric primitive type operator double() const { return Number(); } //! @brief Implicit conversion of string primitive @c %Data //! //! @exception Error will be thrown if this is not a string primitive type operator const std::string&() const { return String(); } //! Return the value of a numeric primitive as type @a T //{{{ //! RFC 7159 recommends that for interoperability an implementation //! should expect numeric primitives to have the precision and range //! of an IEEE 754 @c double precision floating point type. And JSON //! itself makes no distinction between integer and floating point //! numeric values, to it they are all just the same primitive type. //! //! However, in any real use, it is likely that values which are strictly //! always integers will be encoded and decoded. This method can be used //! for safe conversion of a JSON numeric primitive to any other numeric //! type which the software calling it requires. A compile time error //! will occur if the @c double type cannot be @c static_cast to type @a T, //! and a runtime exception will be thrown if this is not a JSON numeric //! primitive. //! //! @tparam T The desired numeric type. //! //! @note This method hides the normal @c RefCounted::As dynamic cast //! operator, since it has the same semantics and the narrowed //! scope of only applying this operation to numeric primitives //! is appropriate here. The base class method can still be //! called explicitly if needed by some specialised case though. //! //! @exception Error will be thrown if there is no data or this is not a //! numeric primitive type //}}} template< typename T > T As() const { return static_cast( Number() ); } //@} //}}} //! @name Object accessors //@{ //{{{ //! Return a primitive type value for an Object member //{{{ //! This method may be used to obtain a primitive type value for a member //! which may or may not exist in the object. If the requested member //! does not exist then a default value for it will be returned instead. //! //! @tparam T The type of data to return. //! //! @param key The name of the desired member. //! @param default_value The value to return if there is no member //! with the requested name in this object. //! //! If the member does exist, its value must be a compatible type to what //! has been requested. //! //! @exception Error will be thrown if this is not an Object, or if the //! member exists but is not a compatible type to @a T. //}}} template< typename T > T Get( const std::string &key, const T &default_value = T() ) const { if( Type() != ObjectType ) throw Error( "%s::Get<%s>( %s ) is not an Object type (%s)", EM_TYPEOF(*this), EM_TYPEOF(T), key.c_str(), JSONStr().c_str() ); Handle d = Get( key ); if( ! d ) return default_value; return static_cast(d); } //! Return a named member of a JSON Object //{{{ //! This method may be used to query for optional member data that //! may not always be present in a particular data structure. //! //! If this @c %Data element is not an object, or a member with the //! requested name does not exist, then a @c NULL handle will be //! returned. //}}} virtual Handle Get( const std::string &key ) const { (void)key; return NULL; } //! Return a named member of a JSON Object //{{{ //! This method may be used to retrieve the value of a known or //! required member of an object. //! //! @exception Error will be thrown if this @c %Data element is not //! an Object or if @a key is not a member of it. //}}} virtual Handle operator[]( const std::string &key ) const { throw Error( "%s::operator[%s]: not an Object type (%s)", EM_TYPEOF(*this), key.c_str(), JSONStr().c_str() ); } //! Return a named member of a JSON Object //{{{ //! This method may be used to retrieve the value of a known or //! required member of an object. //! //! @exception Error will be thrown if this @c %Data element is not //! an Object or if @a key is not a member of it. //}}} virtual Handle operator[]( const char *key ) const { return operator[]( std::string( key ) ); } //! @brief Populate a @a list with the names of all Object members //! //! @exception Error will be thrown if this is not an object type. virtual void GetMembers( MemberList &list ) const { (void)list; throw Error( "%s::GetMembers(): not an Object type (%s)", EM_TYPEOF(*this), JSONStr().c_str() ); } //@} //}}} //! @name Array accessors //@{ //{{{ //! Return a primitive type value for an Array element //{{{ //! This method may be used to obtain a primitive type value for an element //! which may or may not exist in the array. If the requested element does //! not exist then a default value for it will be returned instead. //! //! @tparam T The type of data to return. //! //! @param index The index of the desired element. //! @param default_value The value to return if there is no element //! at the requested index in this array. //! //! If the element does exist, its value must be a compatible type to what //! has been requested. //! //! @exception Error will be thrown if this is not an Array, or if the //! element exists but is not a compatible type to @a T. //! //! This method is probably less useful than the one which queries for //! optional object members, since requesting random array elements is //! not generally a very useful thing to do, but we include it for //! symmetry since there may be times when a hard error is undesirable //! if an expected element may not actually be present. //}}} template< typename T > T Get( unsigned long index, const T &default_value = T() ) const { //{{{ if( Type() != ArrayType ) throw Error( "%s::Get<%s>( %lu ) is not an Array type (%s)", EM_TYPEOF(*this), EM_TYPEOF(T), index, JSONStr().c_str() ); Handle d = Get( index ); if( ! d ) return default_value; return static_cast(d); } //}}} //! Return an element of a JSON Array //{{{ //! If this @c %Data element is not an array, or an element with the //! requested index does not exist, then a @c NULL handle will be //! returned. //! //! This method is probably less useful than the one which queries for //! optional object members, since requesting random array elements is //! not generally a very useful thing to do, but we include it for //! symmetry since there may be times when a hard error is undesirable //! if an expected element may not actually be present. //}}} virtual Handle Get( unsigned long index ) const { (void)index; return NULL; } //! Return an element of a JSON Array //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if this @c %Data element is not //! an Array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains. //}}} virtual Handle operator[]( unsigned long index ) const { throw Error( "%s::operator[%lu]: not an Array type (%s)", EM_TYPEOF(*this), index, JSONStr().c_str() ); } //! Return an element of a JSON Array //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if this @c %Data element is not //! an Array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains, or //! greater than @c ULONG_MAX. //}}} virtual Handle operator[]( unsigned long long index ) const { //{{{ #if ULONG_MAX != ULLONG_MAX using std::out_of_range; if( index > ULONG_MAX ) throw out_of_range( stringprintf("Json::Array[%llu] index > %lu", index, ULONG_MAX) ); #endif return operator[]( static_cast(index) ); } //}}} //! Return an element of a JSON Array //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if this @c %Data element is not //! an Array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains. //}}} virtual Handle operator[]( unsigned index ) const { return operator[]( static_cast(index) ); } //! Return an element of a JSON Array //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if this @c %Data element is not //! an Array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains, or //! less than 0. //}}} virtual Handle operator[]( long index ) const { //{{{ using std::out_of_range; if( index < 0 ) throw out_of_range( stringprintf("Json::Array[%ld] index < 0", index) ); return operator[]( static_cast(index) ); } //}}} //! Return an element of a JSON Array //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if this @c %Data element is not //! an Array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains, or //! less than 0, or is greater than @c LONG_MAX. //}}} virtual Handle operator[]( long long index ) const { //{{{ #if LONG_MAX != LLONG_MAX using std::out_of_range; if( index > LONG_MAX ) throw out_of_range( stringprintf("Json::Array[%lld] index > %ld", index, LONG_MAX) ); #endif return operator[]( long(index) ); } //}}} //! Return an element of a JSON Array //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if this @c %Data element is not //! an Array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains, or //! less than 0. //}}} virtual Handle operator[]( int index ) const { return operator[]( long(index) ); } //! @brief Return the number of elements in an Array //! //! @exception Error will be thrown if this is not an array type. virtual size_t GetArraySize() const { throw Error( "%s::GetArraySize(): not an Array type (%s)", EM_TYPEOF(*this), JSONStr().c_str() ); } //@} //}}} //! @name Output methods //@{ //{{{ //! Return a compact JSON encoded string of this data element virtual std::string JSONStr() const = 0; //@} //}}} }; //}}} //! @name Handle type //@{ //{{{ //! Handle type for a @c %Json parser instance. typedef Data::HandleType< Json > Handle; //@} //}}} private: //! Specialised container for the JSON literal values class EnumData : public Data { //{{{ private: //! JSON literal value type identifiers enum Value { Null, //!< The JSON literal @c null False, //!< The JSCON literal @c false True //!< The JSON literal @c true }; //! Parse a JSON literal value from @a data beginning at @a pos //{{{ //! This method expects to find the start of a JSON literal at @a pos. //! No whitespace or other characters are skipped before testing this. //! //! @param data The JSON text to parse. //! @param pos The byte position within @a data to begin parsing. //! Upon successful return, this will contain the position //! of the byte following the last character of the JSON //! literal value. In the event of failure (an exception //! being thrown) it will remain unchanged. //! //! @return The enumerated @c Value for the literal. //! //! @exception Error will be thrown if the data does not begin with a //! valid JSON literal value. //}}} static Value parse( const std::string &data, size_t &pos ) { //{{{ if( data.find("null", pos) == pos ) { pos += 4; return Null; } if( data.find("false", pos) == pos ) { pos += 5; return False; } if( data.find("true", pos) == pos ) { pos += 4; return True; } throw Error( "Invalid JSON, not a literal at position %zu in '%s'", pos, data.c_str() ); } //}}} //! The enumerated JSON literal value Value m_value; public: //! @name Constructors //@{ //{{{ //! Create a new EnumData instance containing @c null EnumData() : m_value( Null ) {} //! Create a new EnumData instance containing a boolean value EnumData( bool value ) : m_value( value ? True : False ) {} //! Parse a new literal from the @a data at @a pos EnumData( const std::string &data, size_t &pos ) : m_value( parse(data, pos) ) {} //@} //}}} //! @name Data type query //@{ //{{{ virtual DataType Type() const { switch( m_value ) { case Null: return NullType; case False: case True: return BoolType; } // This should never happen, it's just to hush a bogus compiler warning. throw Error( "Bad Json::EnumData value %d", m_value ); } //@} //}}} //! @name Primitive type accessors //@{ //{{{ virtual bool IsTrue() const { switch( m_value ) { case Null: break; case False: return false; case True: return true; } throw Error( "Json::EnumData(%d) is not a Boolean type", m_value ); } //@} //}}} //! @name Output methods //@{ //{{{ virtual std::string JSONStr() const { switch( m_value ) { case Null: return "null"; case False: return "false"; case True: return "true"; } // This should never happen, it's just to hush a bogus compiler warning. throw Error( "Bad Json::EnumData value %d", m_value ); } //@} //}}} }; //}}} //! Specialised container for JSON numeric values class NumberData : public Data { //{{{ private: //! The floating point numerical value double m_value; public: //! @name Constructors //@{ //{{{ //! Create a new NumberData instance containing @a value NumberData( double value ) : m_value( value ) {} //! Parse a new numeric value from the @a data at @a pos //{{{ //! This constructor expects to find the start of a JSON number value //! at @a pos. No whitespace or other characters are skipped before //! testing this. //! //! @param data The JSON text to parse. //! @param pos The byte position within @a data to begin parsing. //! Upon successful return, this will contain the position //! of the byte following the last character of the JSON //! numeric value. In the event of failure (an exception //! being thrown) it will remain unchanged. //! //! @exception Error will be thrown if the data does not begin with a //! valid JSON numeric value. //}}} NumberData( const std::string &data, size_t &pos ) { // Always use the C locale for strtod, since we always want the // radix character to be '.' even in locales where the decimal // point might be a comma instead. #if HAVE_NEWLOCALE static locale_t clocale = newlocale(LC_ALL_MASK, "C", NULL); #elif HAVE__CREATE_LOCALE static _locale_t clocale = _create_locale(LC_ALL, "C"); #endif const char *b = data.c_str() + pos; char *e; // This isn't quite right, it would allow the number to be given in hex, // which RFC 7159 doesn't permit, but that's probably not a problem unless // someone wants to use this code as a strict validator. We could always // check for the hex prefix if we want or need to be fussy about that. #if HAVE_STRTOD_L m_value = strtod_l( b, &e, clocale ); #elif HAVE__STRTOD_L m_value = _strtod_l( b, &e, clocale ); #else // Fall back to using strtod in the current locale. There's not much // else we can safely do here except advise the user (which is done // by the configure test) that they'll need to run this using the "C" // locale if their default locale treats decimal numbers differently // to that. (void)clocale; m_value = strtod( b, &e ); #endif if( b == e ) throw Error( "Invalid JSON, bad number conversion at position %zu in '%s'", pos, data.c_str() ); pos += size_t(e - b); } //@} //}}} //! @name Data type query //@{ //{{{ virtual DataType Type() const { return NumberType; } //@} //}}} //! @name Primitive type accessors //@{ //{{{ virtual double Number() const { return m_value; } //@} //}}} //! @name Output methods //@{ //{{{ virtual std::string JSONStr() const { return stringprintf( "%.12g", m_value ); } //@} //}}} }; //}}} // Forward declaration for the StringData friend class ObjectData; //! Specialised container for JSON string values class StringData : public Data { //{{{ // Allow ObjectData to use our parse() method friend class Json::ObjectData; private: //! The string content std::string m_value; //! Parse a JSON string value from @a data beginning at @a pos //{{{ //! This method expects to find the start of a JSON string at @a pos. //! No whitespace or other characters are skipped before testing this. //! //! @param data The JSON text to parse. //! @param pos The byte position within @a data to begin parsing, this //! is expected to be the first byte of the string itself, //! immediately following the opening quote character. //! Upon successful return, this will contain the position //! of the byte following the terminating quote of the JSON //! string value. In the event of failure (an exception //! being thrown) it will remain unchanged. //! @param context The context to use when reporting an error. This is just //! an informative string that has no internal meaning. We //! use this method for parsing object names too, so this //! lets us say a bit more explicitly exactly what operation //! has failed if it does. //! //! @return The raw string value with any JSON escaping undone. //! //! @exception Error will be thrown if no terminating quote can be found //! to mark the end of the string. //}}} static std::string parse( const std::string &data, size_t &pos, const char *context ) { //{{{ size_t b = pos; size_t e = pos; for(;;) { e = data.find('"', e); if( e == pos ) { // This is an empty "" string, we're all done. ++pos; return std::string(); } if( e == std::string::npos ) throw Error( "Invalid JSON, unterminated %s at position %zu in '%s'", context, pos, data.c_str() ); // Check if the quote we found is escaped. If there is an even number // of backslashes preceding it (and hence an odd number of characters // back to the first non-backslash character) then it is not. // If it is, then keep searching for an unescaped one. size_t n = data.find_last_not_of('\\', e - 1); // If it's backslashes all the way down, then only measure back to the // opening quote where this string started. if( n == std::string::npos || n < pos ) n = pos - 1; if( (e - n) % 2 ) break; ++e; } pos = e + 1; return Unescape( data.substr(b, e - b) ); } //}}} public: //! @name Constructors //@{ //{{{ //! Create a new StringData instance containing @a value StringData( const std::string &value ) : m_value( value ) {} //! Parse a new string from the @a data at @a pos StringData( const std::string &data, size_t &pos ) : m_value( parse(data, pos, "string") ) {} //@} //}}} //! @name Data type query //@{ //{{{ virtual DataType Type() const { return StringType; } //@} //}}} //! @name Primitive type accessors //@{ //{{{ virtual const std::string &String() const { return m_value; } //@} //}}} //! @name Output methods //@{ //{{{ virtual std::string JSONStr() const { return '"' + Escape(m_value) + '"'; } //@} //}}} }; //}}} //! Specialised container for JSON array structures class ArrayData : public Data { //{{{ private: //! Container type for JSON Array elements typedef std::vector< Data::Handle > Elements; //! The array of JSON elements Elements m_elements; public: //! @name Constructors //@{ //{{{ //! Create a new empty ArrayData instance ArrayData() {} //! Parse a new JSON array from the @a data at @a pos //{{{ //! This constructor expects to find the start of JSON array values //! at @a pos. Insignificant whitespace will be skipped both before //! and between the array element values. //! //! @param data The JSON text to parse. //! @param pos The byte position within @a data to begin parsing, this //! is expected to be the first byte after the opening '[' //! character of the array. //! Upon successful return, this will contain the position //! of the byte following the closing ']' of the JSON array //! structure. In the event of failure (an exception being //! thrown) it will likely point to the place where processing //! failed, but no guarantee about where it may point is made. //! //! @exception Error will be thrown if no closing ']' is found to mark //! the end of the array, or if any character other than ',' //! or insignificant whitespace is found between the element //! values. //! @exception Various other exceptions may be thrown if there is an //! error parsing the content of the array element types. //}}} ArrayData( const std::string &data, size_t &pos ) { //{{{ size_t n = skip_whitespace( data, pos ); if( n == std::string::npos ) throw Error("Invalid JSON, unexpected end of array after position %zu in '%s'", pos, data.c_str() ); if( data[n] == ']' ) { pos = n + 1; return; } for(;;) { m_elements.push_back( parse_value(data, pos) ); n = skip_whitespace( data, pos ); if( n == std::string::npos ) throw Error("Invalid JSON, unexpected end of array after position %zu in '%s'", pos, data.c_str() ); switch( data[n] ) { case ',': pos = n + 1; break; case ']': pos = n + 1; return; default: throw Error("Invalid JSON, unexpected character '%c' at position %zu in '%s'", data[n], n, data.c_str() ); } } } //}}} //@} //}}} //! @name Data type query //@{ //{{{ virtual DataType Type() const { return ArrayType; } //@} //}}} //! @name Generic container operations //@{ //{{{ virtual bool empty() const { return m_elements.empty(); } //@} //}}} //! @name JSON array construction methods //@{ //{{{ using Data::AddObject; //(const std::string &name) using Data::AddArray; //(const std::string &name) using Data::AddElement; //(int value), (unsigned value), (const char*) virtual void AddElement() { m_elements.push_back( new EnumData ); } virtual void AddElement( bool value ) { m_elements.push_back( new EnumData(value) ); } virtual void AddElement( double value ) { m_elements.push_back( new NumberData(value) ); } virtual void AddElement( long long value ) { AddElement( double(value) ); } virtual void AddElement( long value ) { AddElement( double(value) ); } virtual void AddElement( unsigned long long value ) { AddElement( double(value) ); } virtual void AddElement( unsigned long value ) { AddElement( double(value) ); } virtual void AddElement( const std::string &value ) { m_elements.push_back( new StringData(value) ); } virtual Data::Handle AddObject() { Handle d = new ObjectData; m_elements.push_back( d ); return d; } virtual Data::Handle AddArray() { Handle d = new ArrayData; m_elements.push_back( d ); return d; } //@} //}}} //! @name Array accessors //@{ //{{{ // Don't hide the overloads that we don't implement here using Data::Get; //(const std::string&); using Data::operator[]; //(const std::string&); virtual Handle Get( unsigned long index ) const { if( index < m_elements.size() ) return m_elements[index]; return NULL; } virtual Handle operator[]( unsigned long index ) const { using std::out_of_range; if( index < m_elements.size() ) return m_elements[index]; throw out_of_range( stringprintf("Json::Array[%lu] index out of bounds" " (array has %zu elements)", index, m_elements.size()) ); } virtual size_t GetArraySize() const { return m_elements.size(); } //@} //}}} //! @name Output methods //@{ //{{{ virtual std::string JSONStr() const { if( m_elements.empty() ) return "[]"; std::string s("[ "); const char *sep = ""; for( Elements::const_iterator i = m_elements.begin(), e = m_elements.end(); i != e; ++i ) { s.append( sep ); s.append( (*i)->JSONStr() ); sep = ", "; } return s + " ]"; } //@} //}}} }; //}}} //! Specialised container for JSON object structures class ObjectData : public Data { //{{{ private: //! Container type for JSON Object members #ifdef BB_HASH_JSON_OBJECT_MEMBERS typedef was_tr1::unordered_map< std::string, Data::Handle > Members; #else typedef std::map< std::string, Data::Handle > Members; #endif //! The members of this JSON Object Members m_members; //! Parse a JSON object member name from @a data beginning at @a pos //{{{ //! This method expects to find the start of a JSON member name string //! immediately after skipping any initial insignificant whitespace. //! //! @param data The JSON text to parse. //! @param pos The byte position within @a data to begin parsing. //! Upon successful return, this will contain the position //! of the byte following the last character of the JSON //! member name. In the event of failure (an exception //! being thrown) it will likely point to the place where //! processing failed, but no guarantee about where it may //! point is made. //! //! @return The raw string member name with any JSON escaping undone. //! //! @exception Error will be thrown if the data does not begin with an //! object member name after skipping insignificant whitespace. //}}} std::string parse_name( const std::string &data, size_t &pos ) { //{{{ size_t b = skip_whitespace( data, pos ); if( b == std::string::npos || data[b] != '"' ) throw Error( "Invalid JSON, expecting object member name at position %zu in '%s'", pos, data.c_str() ); std::string name = StringData::parse( data, ++b, "object member name" ); size_t s = skip_whitespace( data, b ); if( s == std::string::npos || data[s] != ':' ) throw Error( "Invalid JSON, no object name separator at position %zu in '%s'", b, data.c_str() ); pos = s + 1; return name; } //}}} public: //! @name Constructors //@{ //{{{ //! Create a new empty ObjectData instance ObjectData() {} //! Parse a new JSON object from the @a data at @a pos //{{{ //! This constructor expects to find the start of JSON object members //! at @a pos. Insignificant whitespace will be skipped both before //! and between the object member values. //! //! @param data The JSON text to parse. //! @param pos The byte position within @a data to begin parsing, this //! is expected to be the first byte after the opening '{' //! character of the object. //! Upon successful return, this will contain the position //! of the byte following the closing '}' of the JSON object //! structure. In the event of failure (an exception being //! thrown) it will likely point to the place where processing //! failed, but no guarantee about where it may point is made. //! //! @exception Error will be thrown if no closing '}' is found to mark //! the end of the object, or if any character other than ',' //! or insignificant whitespace is found between the member //! name/value pairs. //! @exception Various other exceptions may be thrown if there is an //! error parsing the content of the array element types. //}}} ObjectData( const std::string &data, size_t &pos ) { //{{{ size_t n = skip_whitespace( data, pos ); if( n == std::string::npos ) throw Error("Invalid JSON, unexpected end of object after position %zu in '%s'", pos, data.c_str() ); if( data[n] == '}' ) { pos = n + 1; return; } for(;;) { std::string name = parse_name( data, pos ); Data::Handle v = parse_value( data, pos ); m_members[ name ] = v; n = skip_whitespace( data, pos ); if( n == std::string::npos ) throw Error("Invalid JSON, unexpected end of object after position %zu in '%s'", pos, data.c_str() ); switch( data[n] ) { case ',': pos = n + 1; break; case '}': pos = n + 1; return; default: throw Error("Invalid JSON, unexpected character '%c' at position %zu in '%s'", data[n], n, data.c_str() ); } } } //}}} //@} //}}} //! @name Data type query //@{ //{{{ virtual DataType Type() const { return ObjectType; } //@} //}}} //! @name Generic container operations //@{ //{{{ virtual bool empty() const { return m_members.empty(); } //@} //}}} //! @name JSON object construction methods //@{ //{{{ using Data::AddObject; //(void) using Data::AddArray; //(void) using Data::AddMember; //({unsigned,}{int, long}), (const char*) virtual void AddMember( const std::string &name ) { m_members[ name ] = new EnumData; } virtual void AddMember( const std::string &name, bool value ) { m_members[ name ] = new EnumData( value ); } virtual void AddMember( const std::string &name, double value ) { m_members[ name ] = new NumberData( value ); } virtual void AddMember( const std::string &name, const std::string &value ) { m_members[ name ] = new StringData( value ); } virtual Data::Handle AddObject( const std::string &name ) { return m_members[ name ] = new ObjectData; } virtual Data::Handle AddArray( const std::string &name ) { return m_members[ name ] = new ArrayData; } //@} //}}} //! @name Object accessors //@{ //{{{ // Don't hide the overloads that we don't implement here using Data::Get; //(unsigned); using Data::operator[]; //(unsigned); virtual Handle Get( const std::string &key ) const { Members::const_iterator i = m_members.find( key ); if( i != m_members.end() ) return i->second; return NULL; } virtual Handle operator[]( const std::string &key ) const { Members::const_iterator i = m_members.find( key ); if( i != m_members.end() ) return i->second; throw Error( "Json::Object[%s] no such member", key.c_str() ); } virtual void GetMembers( MemberList &list ) const { for( Members::const_iterator i = m_members.begin(), e = m_members.end(); i != e; ++i ) list.push_back( i->first ); } //@} //}}} //! @name Output methods //@{ //{{{ virtual std::string JSONStr() const { if( m_members.empty() ) return "{}"; std::string s("{ "); const char *sep = ""; for( Members::const_iterator i = m_members.begin(), e = m_members.end(); i != e; ++i ) { s.append( sep ); s.append( 1, '"' ); s.append( Escape(i->first) ); s.append( "\": " ); s.append( i->second->JSONStr() ); sep = ", "; } return s + " }"; } //@} //}}} }; //}}} //! @name JSON structure parsing //@{ //{{{ //! Return the position of the first non-whitespace byte in @a data after @a pos //{{{ //! If there is no character that is not insignificant whitespace after //! @a pos, then @c std::string::npos will be returned. //}}} static size_t skip_whitespace( const std::string &data, size_t pos ) { //{{{ return data.find_first_not_of(" \t\n\r", pos); } //}}} //! Parse a single JSON value from @a data beginning at @a pos //{{{ //! This method expects to find the start of a JSON primitive or structural //! type immediately after skipping any initial insignificant whitespace. //! //! @param data The JSON text to parse. //! @param pos The byte position within @a data to begin parsing. //! Upon successful return, this will contain the position //! of the byte following the last character of the JSON //! value element. In the event of failure (an exception //! being thrown) it will likely point to the place where //! processing failed, but no guarantee about where it may //! point is made. //! //! @return A handle to the @c Json::Data for this value. //! //! @exception Error will be thrown if the data does not begin with a //! JSON data type after skipping insignificant whitespace. //! @exception Various other exceptions may be thrown if there is an //! error parsing the content of the JSON data type. //}}} static Data::Handle parse_value( const std::string &data, size_t &pos ) { //{{{ size_t b = skip_whitespace( data, pos ); if( b == std::string::npos ) throw Error( "Invalid JSON, expecting value at position %zu in '%s'", pos, data.c_str() ); pos = b; EM_TRY_PUSH_DIAGNOSTIC_IGNORE("-Wgnu-case-range") switch( data[pos] ) { case '{': return new ObjectData( data, ++pos ); case '[': return new ArrayData( data, ++pos ); case '"': return new StringData( data, ++pos ); case '-': case '0' ... '9': return new NumberData( data, pos ); case 'f': case 'n': case 't': return new EnumData( data, pos ); default: throw Error( "Invalid JSON value at position %zu in '%s'", pos, data.c_str() ); } EM_POP_DIAGNOSTIC } //}}} //! Parse the root of a JSON @a data string, starting at byte @a pos //{{{ //! This method expects to find the start of an unnamed Object or Array //! immediately after skipping any initial insignificant whitespace. //! //! @param data The JSON text to parse. //! @param pos The byte position within @a data to begin parsing. //! Upon successful return, this will contain the position //! of the byte following the last character of the JSON //! structure. In the event of failure (an exception being //! thrown) it will likely point to the place where processing //! failed, but no guarantee about where it may point is made. //! //! @return A handle to the root object or array. A NULL handle will //! be returned if @a data is an empty string or contains only //! insignificant whitespace. //! //! @exception Error will be thrown if the data does not begin with //! a valid JSON value and is not empty after skipping //! insignificant whitespace. //! @exception Various other exceptions may be thrown if there is an //! error parsing the content of the JSON structure. //}}} static Data::Handle parse_root( const std::string &data, size_t &pos ) { //{{{ pos = skip_whitespace( data, pos ); #ifdef STRICT_RFC4627_COMPATIBILITY if( pos != std::string::npos ) { switch( data[pos] ) { case '{': return new ObjectData( data, ++pos ); case '[': return new ArrayData( data, ++pos ); default: throw Error( "Invalid JSON at position %zu in '%s'", pos, data.c_str() ); } } return NULL; #else // RFC 7159 mode if( pos == std::string::npos ) return NULL; #ifndef JSON_REJECT_TRAILING_JUNK return parse_value( data, pos ); #else // Note: enabling this mode will break the Decode() behaviour of // returning the number of bytes consumed. RFC 7159 complicates // the decision about what is the Right Thing To Do when there is // trailing junk, because compared to the RFC 4627 behaviour some // strings of JSON text can be parsed quite differently. // // For example: "foo": 1 } // is simply invalid to RFC 4627 as the opening brace is missing, // but in RFC 7159 mode, it is the valid primitive string "foo", // and the rest is trailing junk. // // Applications using RFC 7159 mode that care about detecting that // sort of error are probably still best off NOT enabling this and // simply checking if the type of the root was an object (or array) // if that is what they require. Data::Handle v = parse_value( data, pos ); size_t n = skip_whitespace( data, pos ); if( n == std::string::npos ) return v; throw Error( "Invalid JSON, unexpected character '%c' at position %zu in '%s'", data[n], n, data.c_str() ); #endif #endif } //}}} //! Parse the structure of a JSON @a data string static Data::Handle parse( const std::string &data ) { //{{{ size_t pos = 0; return parse_root( data, pos ); } //}}} //@} //}}} //! The unnamed root Object or Array structure Data::Handle m_root; public: //! @name Constructors //@{ //{{{ //! Default constructor for a new empty parser Json() {} //! Construct a new instance, parsing the JSON @a data string //{{{ //! If there is trailing content after a valid JSON structure then it //! will simply be ignored. //! //! @exception Various exceptions may be thrown if the initial part //! of the @a data string is not a valid JSON structure. //}}} Json( const std::string &data ) : m_root( parse(data) ) {} //! Construct a new instance, parsing the JSON @a data string //{{{ //! If there is trailing content after a valid JSON structure then it //! will simply be ignored. //! //! This constructor will never throw. Instead, if there is an error, //! then a description of it will be returned in the @a error parameter. //! If an error is returned, then the content of this parser instance //! is undefined and no attempt should be made to use it. //! //! If no error occurs, then the content of @a error will be untouched. //! Sane users will generally want to ensure it is empty before this //! method is called, but that is not a hard requirement. If an error //! occurs the content will be replaced, not appended to. //}}} Json( const std::string &data, std::string &error ) { Decode( data, error ); } //@} //}}} //! @name Generic container operations //@{ //{{{ //! Erases all data currently held in this parser instance void clear() { m_root = NULL; } //! Return @c true if this parser contains no data //{{{ //! It will still return @c false if it contains only a root object //! or array but that structure is currently empty. //}}} bool empty() const { return ! m_root ? true : m_root->empty(); } //@} //}}} //! @name Initialiser methods //@{ //{{{ //! Decode a new JSON @a data string //{{{ //! The existing content of this parser, if any, will be replaced by //! the new data. //! //! @return The number of bytes consumed from @a data. This may be //! less than the size of @a data if there is trailing content //! that is not part of the JSON root Object or Array. For a //! returned value of @a n, data[n] will be //! the first byte of trailing non-JSON data in the string. //! //! @exception Various exceptions may be thrown if the initial part //! of the @a data string is not a valid JSON structure. //}}} size_t Decode( const std::string &data ) { //{{{ size_t pos = 0; m_root = parse_root( data, pos ); return pos; } //}}} //! Decode a new JSON @a data string //{{{ //! The existing content of this parser, if any, will be replaced by //! the new data. //! //! This method will never throw. Instead, if there is an error, then //! a description of it will be returned in the @a error parameter. //! If an error is returned, then the content of this parser instance //! (and the return value of this method) is undefined and no attempt //! should be made to use them. //! //! If no error occurs, then the content of @a error will be untouched. //! Sane users will generally want to ensure it is empty before this //! method is called, but that is not a hard requirement. If an error //! occurs the content will be replaced, not appended to. //! //! @return The number of bytes consumed from @a data. This may be //! less than the size of @a data if there is trailing content //! that is not part of the JSON root Object or Array. For a //! returned value of @a n, data[n] will be //! the first byte of trailing non-JSON data in the string. //}}} size_t Decode( const std::string &data, std::string &error ) { //{{{ size_t pos = 0; try { m_root = parse_root( data, pos ); } catch( const abi::__forced_unwind& ) { throw; } catch( const std::exception &e ) { error = e.what(); } catch( ... ) { error = "Unknown exception"; } return pos; } //}}} //! Create a JSON object structure //{{{ //! This method will destroy any existing data that may be contained in //! this parser instance, and return a handle to a new Object that can //! be populated with members. //}}} Data::Handle NewObject() { //{{{ m_root = new ObjectData; return m_root; } //}}} //! Create a JSON array structure //{{{ //! This method will destroy any existing data that may be contained in //! this parser instance, and return a handle to a new Array that can //! be populated with elements. //}}} Data::Handle NewArray() { //{{{ m_root = new ArrayData; return m_root; } //}}} //@} //}}} //! @name JSON object construction methods //@{ //{{{ //! Add a new @c null member to the root Object void AddMember( const std::string &name ) { //{{{ if( ! m_root ) throw Error( "Json::AddMember( %s ): no root Object to add to", name.c_str() ); m_root->AddMember( name ); } //}}} //! Add a new member to the root Object template< typename T > void AddMember( const std::string &name, T value ) { //{{{ if( ! m_root ) throw Error( "Json::AddMember( %s ): no root Object to add to", name.c_str() ); m_root->AddMember( name, value ); } //}}} //! Add a new object member to the root Object //{{{ //! @return A handle to the newly created object which can then //! be used to add members to it. //}}} Data::Handle AddObject( const std::string &name ) { //{{{ if( ! m_root ) throw Error( "Json::AddObject( %s ): no root Object to add to", name.c_str() ); return m_root->AddObject( name ); } //}}} //! Add a new array member to the root Object //{{{ //! @return A handle to the newly created array which can then //! be used to add elements to it. //}}} Data::Handle AddArray( const std::string &name ) { //{{{ if( ! m_root ) throw Error( "Json::AddArray( %s ): no root Object to add to", name.c_str() ); return m_root->AddArray( name ); } //}}} //@} //}}} //! @name JSON array construction methods //@{ //{{{ //! Append a new @c null element to the root Array void AddElement() { //{{{ if( ! m_root ) throw Error( "Json::AddElement(): no root Array to add to" ); m_root->AddElement(); } //}}} //! Append a new element to the root Array template< typename T > void AddElement( T value ) { //{{{ if( ! m_root ) throw Error( "Json::AddElement( %s ): no root Array to add to", EM_TYPEOF(T) ); m_root->AddElement( value ); } //}}} //! Append a new object element to this Array //{{{ //! @return A handle to the newly created object which can then //! be used to add members to it. //}}} Data::Handle AddObject() { //{{{ if( ! m_root ) throw Error( "Json::AddElement(): no root Array to add to" ); return m_root->AddObject(); } //}}} //! Append a new array element to this Array //{{{ //! @return A handle to the newly created array which can then //! be used to add elements to it. //}}} Data::Handle AddArray() { //{{{ if( ! m_root ) throw Error( "Json::AddElement(): no root Array to add to" ); return m_root->AddArray(); } //}}} //@} //}}} //! @name Root accessors //@{ //{{{ //! Return the @c #DataType of the root JSON value //{{{ //! If the parser is currently empty it will return the @c #NullType. //! //! When built with @c STRICT_RFC4627_COMPATIBILITY defined, the only //! (other) valid root types are @c Object or @c Array structured //! data, but RFC 7159 allows valid JSON text to consist of any //! JSON value including just a single primitive type. //}}} DataType RootType() const { return ! m_root ? NullType : m_root->Type(); } //! Return the value of the JSON text root //{{{ //! If the parser is currently empty it will return a @c NULL handle //! (not to be confused with a value of the primitive @c #NullType). //! //! When built with @c STRICT_RFC4627_COMPATIBILITY defined, the only //! (other) valid root values are @c Object or @c Array structured //! data, but RFC 7159 allows valid JSON text to consist of any //! JSON value including just a single primitive type. //}}} const Data::Handle &GetRoot() const { return m_root; } //@} //}}} //! @name Primitive type accessors //@{ //{{{ //! Return @c true if the only data is a @c null literal primitive type // //! @exception Error will be thrown if there is no data bool IsNull() const { if( ! m_root ) throw Error( "Json::IsNull: no data" ); return m_root->IsNull(); } //! Return @c true or @c false if the only data is a boolean primitive // //! @exception Error will be thrown if there is no data or this is not a //! boolean primitive type bool IsTrue() const { if( ! m_root ) throw Error( "Json::IsTrue: no data" ); return m_root->IsTrue(); } //! Return the value if the only data is a numeric primitive // //! @exception Error will be thrown if there is no data or this is not a //! numeric primitive type double Number() const { if( ! m_root ) throw Error( "Json::Number: no data" ); return m_root->Number(); } //! Return the value if the only data is a string primitive // //! @exception Error will be thrown if there is no data or this is not a //! string primitive type const std::string &String() const { if( ! m_root ) throw Error( "Json::String: no data" ); return m_root->String(); } //! Implicit conversion of numeric primitive data // //! @exception Error will be thrown if there is no data or this is not a //! numeric primitive type operator double() const { return Number(); } //! Implicit conversion of string primitive data // //! @exception Error will be thrown if there is no data or this is not a //! string primitive type operator const std::string&() const { return String(); } //! Return the value of a numeric primitive as type @a T //{{{ //! RFC 7159 recommends that for interoperability an implementation //! should expect numeric primitives to have the precision and range //! of an IEEE 754 @c double precision floating point type. And JSON //! itself makes no distinction between integer and floating point //! numeric values, to it they are all just the same primitive type. //! //! However, in any real use, it is likely that values which are strictly //! always integers will be encoded and decoded. This method can be used //! for safe conversion of a JSON numeric primitive to any other numeric //! type which the software calling it requires. A compile time error //! will occur if the @c double type cannot be @c static_cast to type @a T, //! and a runtime exception will be thrown if this is not a JSON numeric //! primitive. //! //! @tparam T The desired numeric type. //! //! @note This method hides the normal @c RefCounted::As dynamic cast //! operator, since it has the same semantics and the narrowed //! scope of only applying this operation to numeric primitives //! is appropriate here. The base class method can still be //! called explicitly if needed by some specialised case though. //! //! @exception Error will be thrown if there is no data or this is not a //! numeric primitive type //}}} template< typename T > T As() const { return static_cast( Number() ); } //@} //}}} //! @name Object accessors //@{ //{{{ //! Return a primitive type value for an Object member //{{{ //! This method may be used to obtain a primitive type value for a member //! which may or may not exist in the object. If the requested member //! does not exist then a default value for it will be returned instead. //! //! @tparam T The type of data to return. //! //! @param key The name of the desired member. //! @param default_value The value to return if there is no member //! with the requested name in this object. //! //! If the member does exist, its value must be a compatible type to what //! has been requested. //! //! @exception Error will be thrown if this is not an Object, or if the //! member exists but is not a compatible type to @a T. //}}} template< typename T > T Get( const std::string &key, const T &default_value = T() ) const { if( ! m_root ) return default_value; return m_root->Get( key, default_value ); } //! Return the value of the Object member named by @a key //{{{ //! This method may be used to query for optional member data that //! may not always be present in a particular data structure. //! //! If the root structure is not an object, or a member with the //! requested name does not exist, then a @c NULL handle will be //! returned. //}}} Data::Handle Get( const std::string &key ) const { //{{{ if( ! m_root ) return NULL; return m_root->Get( key ); } //}}} //! Return the value of the Object member named by @a key //{{{ //! This method may be used to retrieve the value of a known or //! required member of an object. //! //! @exception Error will be thrown if the member does not exist, //! or the root structure is not an object. //}}} Data::Handle operator[]( const std::string &key ) const { //{{{ if( ! m_root ) throw Error( "Json::operator[%s]: no data to access", key.c_str() ); return m_root[key]; } //}}} //! Return the value of the Object member named by @a key //{{{ //! This method may be used to retrieve the value of a known or //! required member of an object. //! //! @exception Error will be thrown if the member does not exist, //! or the root structure is not an object. //}}} Data::Handle operator[]( const char *key ) const { return operator[]( std::string( key ) ); } //! Populate a @a list with the names of all Object members //{{{ //! @exception Error will be thrown if the root structure is not //! an object. //}}} void GetMembers( MemberList &list ) const { //{{{ if( ! m_root ) throw Error( "Json::GetMembers(): no data to access" ); m_root->GetMembers( list ); } //}}} //@} //}}} //! @name Array accessors //@{ //{{{ //! Return a primitive type value for an Array element //{{{ //! This method may be used to obtain a primitive type value for an element //! which may or may not exist in the array. If the requested element does //! not exist then a default value for it will be returned instead. //! //! @tparam T The type of data to return. //! //! @param index The index of the desired element. //! @param default_value The value to return if there is no element //! at the requested index in this array. //! //! If the element does exist, its value must be a compatible type to what //! has been requested. //! //! @exception Error will be thrown if this is not an Array, or if the //! element exists but is not a compatible type to @a T. //! //! This method is probably less useful than the one which queries for //! optional object members, since requesting random array elements is //! not generally a very useful thing to do, but we include it for //! symmetry since there may be times when a hard error is undesirable //! if an expected element may not actually be present. //}}} template< typename T > T Get( unsigned long index, const T &default_value = T() ) const { if( ! m_root ) return default_value; return m_root->Get( index, default_value ); } //! Return the value of the Array element at @a index //{{{ //! If the root structure is not an array, or an element with the //! requested index does not exist, then a @c NULL handle will be //! returned. //! //! This method is probably less useful than the one which queries for //! optional object members, since requesting random array elements is //! not generally a very useful thing to do, but we include it for //! symmetry since there may be times when a hard error is undesirable //! if an expected element may not actually be present. //}}} Data::Handle Get( unsigned long index ) const { //{{{ if( ! m_root ) return NULL; return m_root->Get( index ); } //}}} //! Return the value of the Array element at @a index //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if the root structure is not //! an array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains. //}}} Data::Handle operator[]( unsigned long index ) const { //{{{ if( ! m_root ) throw Error( "Json::operator[%lu]: no data to access", index ); return m_root[index]; } //}}} //! Return the value of the Array element at @a index //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if the root structure is not //! an array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains, or //! greater than @c ULONG_MAX. //}}} Data::Handle operator[]( unsigned long long index ) const { //{{{ #if ULONG_MAX != ULLONG_MAX using std::out_of_range; if( index > ULONG_MAX ) throw out_of_range( stringprintf("Json::Array[%llu] index > %lu", index, ULONG_MAX) ); #endif return operator[]( static_cast(index) ); } //}}} //! Return the value of the Array element at @a index //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if the root structure is not //! an array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains. //}}} Data::Handle operator[]( unsigned index ) const { return operator[]( static_cast(index) ); } //! Return the value of the Array element at @a index //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if the root structure is not //! an array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains, or //! less than 0. //}}} Data::Handle operator[]( long index ) const { //{{{ using std::out_of_range; if( index < 0 ) throw out_of_range( stringprintf("Json::Array[%ld] index < 0", index) ); return operator[]( static_cast(index) ); } //}}} //! Return the value of the Array element at @a index //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if the root structure is not //! an array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains, or //! less than 0, or is greater than @c LONG_MAX. //}}} Data::Handle operator[]( long long index ) const { //{{{ #if LONG_MAX != LLONG_MAX using std::out_of_range; if( index > LONG_MAX ) throw out_of_range( stringprintf("Json::Array[%lld] index > %ld", index, LONG_MAX) ); #endif return operator[]( long(index) ); } //}}} //! Return the value of the Array element at @a index //{{{ //! This method may be used to retrieve the value of a known or //! required element of an array. //! //! @exception Error will be thrown if the root structure is not //! an array. //! @exception std::out_of_range will be thrown if the @a index is //! greater than the number of elements it contains, or //! less than 0. //}}} Data::Handle operator[]( int index ) const { return operator[]( long(index) ); } //! Return the number of elements in an Array //{{{ //! @exception Error will be thrown if the root structure is not //! an array. //}}} size_t GetArraySize() const { //{{{ if( ! m_root ) throw Error( "Json::GetArraySize(): no data to access" ); return m_root->GetArraySize(); } //}}} //@} //}}} //! @name Output methods //@{ //{{{ //! Return a JSON encoded string of the structures in this parser //{{{ //! @note The string returned may not be bit identical to the one //! that was originally parsed. The ordering of members in an //! Object may not be preserved and insignificant whitespace //! may be different. //! //! @exception Error will be thrown if there is no data to output. //}}} std::string JSONStr() const { if( ! m_root ) throw Error("JSONStr(): no data to output"); return m_root->JSONStr(); } //@} //}}} }; //}}} #ifndef __DOXYGEN_PASS__ // Specialisations for boolean type data template<> inline bool Json::Data::Get( const std::string &key, const bool &default_value ) const { //{{{ if( Type() != ObjectType ) throw Error( "%s::Get( %s ) is not an Object type (%s)", EM_TYPEOF(*this), key.c_str(), JSONStr().c_str() ); Handle d = Get( key ); if( ! d ) return default_value; return d->IsTrue(); } //}}} template<> inline bool Json::Data::Get( unsigned long index, const bool &default_value ) const { //{{{ if( Type() != ArrayType ) throw Error( "%s::Get( %lu ) is not an Array type (%s)", EM_TYPEOF(*this), index, JSONStr().c_str() ); Handle d = Get( index ); if( ! d ) return default_value; return d->IsTrue(); } //}}} #endif //!@} } // BitB namespace #endif // _BB_JSON_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/log.h0000644000000000000000000003310414136173163015145 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2003 - 2021, Ron // // You must include bit-babbler/impl/log.h exactly once in some translation unit // of any program using the Log() functions. #ifndef _BB_LOG_H #define _BB_LOG_H #include #ifdef _REENTRANT #include #if HAVE_PTHREAD_SET_NAME_NP #include #endif #endif #if EM_PLATFORM_POSIX #include #else // Stub definitions of the syslog functionality we use. // The openlog and vsyslog functions will need to be implemented suitably // for platforms that don't provide them natively. // Option flag for openlog #define LOG_PID 0x01 // Log the Process ID with each message // Syslog facility #define LOG_DAEMON (3<<3) // system daemons // Syslog priority #define LOG_NOTICE 5 // normal but significant condition void openlog(const char *ident, int option, int facility); void syslog(int priority, const char *format, ...); void vsyslog(int priority, const char *format, va_list ap); #endif #if !HAVE_DECL_LOG_MAKEPRI // This is missing on OpenBSD 6.1, it is fairly widely supported // elsewhere, but isn't actually required by POSIX.1-2008 (SuSv4). #define LOG_MAKEPRI(facility, priority) ((facility) | (priority)) #endif #include #include #if EM_USE_GETTEXT #include #define _(x) gettext(x) #define P_(x,y,n) ngettext(x,y,n) #else #define _(x) x #define P_(singular, plural, number) ( number == 1 ? singular : plural ) #endif #define BB_CATCH_STD( LogLevel, Message, ... ) \ catch( const std::exception &e ) \ { \ BitB::Log< LogLevel >( "%s: %s\n", Message, e.what() ); \ __VA_ARGS__ \ } \ catch( ... ) \ { \ BitB::Log< LogLevel >( "%s\n", Message ); \ __VA_ARGS__ \ } #define BB_CATCH_ALL( LogLevel, Message, ... ) \ catch( const abi::__forced_unwind& ) \ { \ BitB::Log< LogLevel >( "%s: thread cancelled\n", Message ); \ __VA_ARGS__ \ throw; \ } \ BB_CATCH_STD( LogLevel, Message, __VA_ARGS__ ) namespace BitB { //! @name String length functions //! Overloaded for generic template friendliness. //@{ //{{{ //! Return the (string) length of a @c char. inline size_t stringlength( char ) { return 1; } //! Return the length of a @c NULL terminated @c char* string. inline size_t stringlength( const char *s ) { return ( s ) ? strlen( s ) : 0; } //! Return the (string) length of a @c wchar_t. inline size_t stringlength( wchar_t ) { return 1; } //! Return the length of a @c NULL terminated @c wchar_t* string. inline size_t stringlength( const wchar_t *s ) { return ( s ) ? wcslen( s ) : 0; } //! Return the length of a string type (with a @c size() method). template< typename S > BB_PURE size_t stringlength( const S &s ) { return s.size(); } //@} //}}} //! Test if @a s starts with the string @a c //{{{ //! @param c The substring to match. It may be a: //! - @c NULL terminated @c char* or @c wchar_t* string //! - @c std::basic_string compatible type. //! @param s The string to match @a c in. Must be a @c std::basic_string //! compatible type. //! //! @return @c true if @a s starts with the string @a c //}}} template< typename C, typename S > bool StartsWith( C c, const S &s ) { //{{{ size_t n = stringlength(c); if( s.size() < n ) return false; return s.compare( 0, n, c ) == 0; } //}}} //! Returns all characters after the first occurrence of @a c, or empty if @a c is not in @a s. //{{{ //! @param c The character or substring to match. It may be a: //! - single @c char or @c wchar_t type //! - @c NULL terminated @c char* or @c wchar_t* string //! - @c std::basic_string compatible type. //! @param s The string to match @a c in. Must be a @c std::basic_string //! compatible type. //! //! @return A string of type @a S, containing all characters after the first //! occurrence of @a c, or an empty string if @a c is not in @a s. //}}} template< typename C, typename S > S afterfirst( C c, const S &s ) { //{{{ typename S::size_type n = s.find( c ); return ( n == S::npos || ( n += stringlength( c ), n == s.size() ) ) ? S() : s.substr( n ); } //}}} //! Return all characters before the first occurrence of @a c, or @a s if @a c is not in @a s. //{{{ //! @param c The character or substring to match. It may be a: //! - single @c char or @c wchar_t type //! - @c NULL terminated @c char* or @c wchar_t* string //! - @c std::basic_string compatible type. //! @param s The string to match @a c in. Must be a @c std::basic_string //! compatible type. //! //! @return A string of type @a S, containing all characters before the //! first occurrence of @a c, or @a s if @a c is not in @a s. //}}} template< typename C, typename S > S beforefirst( C c, const S &s ) { //{{{ typename S::size_type n = s.find( c ); return ( n == S::npos ) ? s : ( n == 0 ) ? S() : S( s.substr( 0, n ) ); // cast here to keep the nested ternary operator happy } //}}} typedef std::basic_string OctetString; std::string OctetsToHex( const OctetString &octets, size_t wrap = 0, bool short_form = false ); static inline std::string OctetsToShortHex( const OctetString &octets, size_t wrap = 0 ) { return OctetsToHex( octets, wrap, true ); } template< typename T > std::string AsBinary( T n ) { //{{{ std::string s; for( int i = sizeof(T) * 8 - 1; i >= 0; --i ) s.push_back( n & 1ull< BB_PRINTF_FORMAT(1,0) void Logv( const char *format, va_list arglist ) { //{{{ if( opt_verbose < N ) return; // On OpenBSD 6.1, vfprintf is an unsafe cancellation point, where a // thread can be cancelled while the _thread_flockfile mutex is still // held, which results in the next attempt to call it hanging forever. // So block cancellation here to work around that bug until it's fixed. #if HAVE_BROKEN_STDIO_LOCKING && defined(_REENTRANT) int oldstate; pthread_testcancel(); pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, &oldstate ); #endif if( opt_timestamp ) { std::string msg = timeprintf("%T.%%u") + ": " + vstringprintf( format, arglist ); if( opt_syslog ) syslog( LOG_MAKEPRI(LOG_DAEMON, LOG_NOTICE), "%s", msg.c_str() ); else fprintf( stderr, "%s", msg.c_str() ); } else { if( opt_syslog ) vsyslog( LOG_MAKEPRI(LOG_DAEMON, LOG_NOTICE), format, arglist ); else vfprintf( stderr, format, arglist ); } #if HAVE_BROKEN_STDIO_LOCKING && defined(_REENTRANT) pthread_setcancelstate( oldstate, NULL ); #endif } //}}} template< int N > BB_PRINTF_FORMAT(1,2) void Log( const char *format, ... ) { //{{{ va_list arglist; va_start( arglist, format ); Logv( format, arglist ); va_end( arglist ); } //}}} template< int N > BB_PRINTF_FORMAT(1,2) void LogErr( const char *format, ... ) { //{{{ va_list arglist; va_start( arglist, format ); std::string msg = vstringprintf( format, arglist ); if( msg.size() && msg[msg.size() - 1] == '\n' ) msg.erase( msg.size() - 1 ); Log( "%s: %s\n", msg.c_str(), strerror(errno) ); va_end( arglist ); } //}}} template< int N > BB_PRINTF_FORMAT(2,3) void LogErr( int code, const char *format, ... ) { //{{{ va_list arglist; va_start( arglist, format ); std::string msg = vstringprintf( format, arglist ); if( msg.size() && msg[msg.size() - 1] == '\n' ) msg.erase( msg.size() - 1 ); Log( "%s: %s\n", msg.c_str(), strerror(code) ); va_end( arglist ); } //}}} std::string DemangleSymbol( const char *sym ); #define EM_TYPEOF( T ) BitB::DemangleSymbol( typeid( T ).name() ).c_str() } #endif // _BB_LOG_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/math.h0000644000000000000000000000264114136173163015317 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2013 - 2021, Ron #ifndef _BB_MATH_H #define _BB_MATH_H #include namespace BitB { // Implicit modulo 1<<32 is part of the algorithm, so tell UBSan to relax. BB_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW static inline uint32_t popcount( uint32_t v ) { // This can be notoriously crappy, but we don't care a lot here // this isn't the hottest operation we could usefully optimise. //return __builtin_popcount( v ); v = v - ((v >> 1) & 0x55555555); v = (v & 0x33333333) + ((v >> 2) & 0x33333333); return (((v + (v >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; } static inline unsigned fls( unsigned v ) { return v ? sizeof(unsigned) * 8 - unsigned(__builtin_clz(v)) : 0; } static inline unsigned fls( unsigned long v ) { return v ? sizeof(unsigned long) * 8 - unsigned(__builtin_clzl(v)) : 0; } static inline unsigned fls( unsigned long long v ) { return v ? sizeof(unsigned long long) * 8 - unsigned(__builtin_clzll(v)) : 0; } template< typename T > BB_CONST T powof2_down( T v ) { return T(1) << (fls(v) - 1); } template< typename T > BB_CONST T powof2_up( T v ) { return T(1) << fls(v - 1); } } #endif // _BB_MATH_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/qa.h0000644000000000000000000025634114136173163014777 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2014 - 2021, Ron // // With much kudos to John "Random" Walker for the public domain ENT suite // of tests, which the implementation below doesn't actually take any code // from, but which was shamelessly pillaged for ideas about which metrics // can be the most valuable indicators of randomness being Not Quite Right. // The original code for ENT can be found at #ifndef _BB_QA_H #define _BB_QA_H #include #include #include #include #include #include #include namespace BitB { // This one probably doesn't really belong here, but everything that needs it // includes this header, and putting it alone in its own would be overkill. // It might move somewhere more appropriate if we ever have such a thing. static inline size_t FoldBytes( uint8_t *buf, size_t len, unsigned folds ) { //{{{ if( len & ((1u << folds) - 1) ) throw Error( _("FoldBytes: length %zu cannot fold %u times"), len, folds ); for( ; folds; --folds ) { len >>= 1; for( size_t i = 0; i < len; ++i ) buf[i] ^= buf[len + i]; } return len; } //}}} namespace QA { // Implements the metrics from John "Random" Walker's ENT suite, as well // as an estimation of the min-entropy using the method that is described // in section 9.2 of the NIST SP 800-90B 2012 draft. These tests provide // a more sensitive measure of the statistical quality of the RNG, though // not as good as a more comprehensive suite like dieharder, but are also // still fast to run, so we can use them for continuous reporting on the // generated bitstream. We can also use them to provide both short and // long term measurements, since they are more accurate on larger sets of // data, but provide good enough results on smaller sets to make changes // in the statistical quality of the data become quickly evident. template< typename T > class Ent { //{{{ public: static const size_t NBITS = sizeof(T) * 8; static const size_t NBINS = 1u << NBITS; enum DataSet { //{{{ // MIN and MAX are slightly fuzzy concepts here, but BEST and WORST // wouldn't actually fit all the data types much better either ... // In general MIN is minimum value or error, and MAX is maximum // value or error, as is most appropriate for each data point. // Specifically: // // For entropy, MAX is the best value seen and MIN is the worst. // // For Chi^2 there is no single best value - an ideal result will // wander about in the expected range, but shouldn't wander above // or below it too far or for too long. Staying fixed exactly in // the middle of it all the time would be suspiciously too uniform. // // For Mean, Autocorrelation, and Pi, MIN is the best value seen // (the least deviation above or below the ideal value) and MAX is // the result with the largest error (whether positive or negative). CURRENT = 0, MIN = 1, MAX = 2, DATASET_MAX }; //}}} static const char *DataSetName( DataSet s ) { //{{{ switch( s ) { case CURRENT: return "Current"; case MIN: return "Min"; case MAX: return "Max"; case DATASET_MAX: break; } return "Unknown DataSet"; } //}}} struct Limits { //{{{ size_t long_minsamples; double long_entropy; double short_entropy; double long_chisq_min; double long_chisq_max; double short_chisq_min; double short_chisq_max; double long_mean_min; double long_mean_max; double short_mean_min; double short_mean_max; double long_pi; double short_pi; double long_corr; double short_corr; double long_minentropy; double short_minentropy; unsigned recovery_blocks; }; //}}} struct Result { //{{{ double entropy; double chisq; double mean; double pi; double corr; double minentropy; Result() : entropy( 0.0 ) , chisq( 0.0 ) , mean( 0.0 ) , pi( 0.0 ) , corr( 0.0 ) , minentropy( 0.0 ) {} Result( DataSet set ) { clear( set ); } Result( const Json::Data::Handle &result ) : entropy( result["Entropy"] ) , chisq( result["Chisq"] ) , mean( result["Mean"] ) , pi( result["Pi"] ) , corr( result["Autocorr"] ) , minentropy( result["MinEntropy"] ) {} void clear( DataSet set = CURRENT ) { //{{{ switch( set ) { case MIN: entropy = DBL_MAX; chisq = DBL_MAX; mean = DBL_MAX; pi = DBL_MAX; corr = DBL_MAX; minentropy = DBL_MAX; break; case MAX: entropy = -DBL_MAX; chisq = -DBL_MAX; mean = (1u << (NBITS - 1)) - 0.5; pi = M_PI; corr = 0.0; minentropy = -DBL_MAX; break; case CURRENT: case DATASET_MAX: entropy = 0.0; chisq = 0.0; mean = 0.0; pi = 0.0; corr = 0.0; minentropy = 0.0; break; } } //}}} bool operator!=( const Result &r ) const { //{{{ return r.entropy < entropy || r.entropy > entropy || r.chisq < chisq || r.chisq > chisq || r.mean < mean || r.mean > mean || r.pi < pi || r.pi > pi || r.corr < corr || r.corr > corr || r.minentropy < minentropy || r.minentropy > minentropy; } //}}} bool operator==( const Result &r ) const { return ! (*this != r); } double ChisqProb() const { return pochisq( chisq, NBINS - 1 ); } double PiError() const { //{{{ // If this is queried when cleared to MIN or MAX then it will // evaluate to +/- inf since pe will overflow. That's not a // very useful (or true) answer here, expecially since we pass // this into JSON streams where 'inf' is not a valid number. // So just return +/- DBL_MAX in those cases. double pe = 100.0 * (pi - M_PI) / M_PI; if( std::isfinite(pe) ) return pe; if( pe < 0.0 ) return -DBL_MAX; return DBL_MAX; } //}}} std::string Report() const { //{{{ return stringprintf( "Hs %f, Hm %f, Mean %f, Corr % .8f," " π %.8f (% .5f), χ² %f (%.2f)", entropy, minentropy, mean, corr, pi, PiError(), chisq, ChisqProb() ); } //}}} std::string AsJSON() const { //{{{ return stringprintf( "{" "\"Entropy\":%f" ",\"Chisq\":%f" ",\"Chisq-p\":%f" ",\"Mean\":%f" ",\"Pi\":%f" ",\"Pi-error\":%f" ",\"Autocorr\":%f" ",\"MinEntropy\":%f" "}", entropy, chisq, ChisqProb(), mean, pi, PiError(), corr, minentropy ); } //}}} }; //}}} struct Fail { //{{{ size_t tested; size_t entropy; size_t chisq; size_t mean; size_t pi; size_t corr; size_t minentropy; Fail() : tested( 0 ) , entropy( 0 ) , chisq( 0 ) , mean( 0 ) , pi( 0 ) , corr( 0 ) , minentropy( 0 ) {} Fail( const Json::Data::Handle &fail ) : tested( fail["Tested"]->As() ) , entropy( fail["Entropy"]->As() ) , chisq( fail["Chisq"]->As() ) , mean( fail["Mean"]->As() ) , pi( fail["Pi"]->As() ) , corr( fail["Autocorr"]->As() ) , minentropy( fail["MinEntropy"]->As() ) {} std::string Report() const { //{{{ return stringprintf( "Tested %zu, Hs %zu, Hm %zu, Mean %zu," " Corr %zu, π %zu, χ² %zu", tested, entropy, minentropy, mean, corr, pi, chisq ); } //}}} std::string AsJSON() const { //{{{ return stringprintf( "{" "\"Tested\":%zu" ",\"Entropy\":%zu" ",\"Chisq\":%zu" ",\"Mean\":%zu" ",\"Pi\":%zu" ",\"Autocorr\":%zu" ",\"MinEntropy\":%zu" "}", tested, entropy, chisq, mean, pi, corr, minentropy ); } //}}} }; //}}} static struct Results_Only_ {} Results_Only; struct Data { //{{{ private: // Helper struct to rank and sort bins for reporting struct Bin { //{{{ typedef std::vector< Bin > Vector; unsigned rank; unsigned symbol; size_t freq; Bin( unsigned s, size_t f ) : rank( 0 ) , symbol( s ) , freq( f ) {} static bool ByFrequency( const Bin &a, const Bin &b ) { return a.freq > b.freq; } static std::string PrettyPrint( const Vector &bins, size_t nsamples, size_t first_n = NBINS, size_t last_n = NBINS ) { //{{{ double dsamples = double(nsamples); double expected = dsamples / NBINS; double chisq = 0; size_t min = size_t(-1); size_t max = 0; std::string s = stringprintf( "Samples: %zu\n", nsamples ); if( first_n > 0 || last_n > 0 ) { s += sizeof(T) > 1 ? " Rank Bin Freq" : "Rank Bin Freq"; s += " Error χ² % of Samples\n"; } last_n = last_n < bins.size() ? bins.size() - last_n : 0; for( size_t i = 0, e = bins.size(); i < e; ++i ) { const Bin &b = bins[i]; double dfreq = double(b.freq); double error = dfreq - expected; double errorsq = error * error / expected; if( min > b.freq ) min = b.freq; if( max < b.freq ) max = b.freq; chisq += errorsq; if( i >= first_n && i < last_n ) continue; s += stringprintf( "%*u: %s %.*x -> %-12zu %+10.2f %8.2f %.9f\n", sizeof(T) > 1 ? 5 : 3, b.rank, AsBinary(T(b.symbol)).c_str(), int(sizeof(T) * 2), b.symbol, b.freq, error, errorsq, dfreq / dsamples ); } double dmin = double(min); double dmax = double(max); s += stringprintf( " Expected %.3f, %+.3f (%+.3f%%), %+.3f (%+.3f%%)\n" " χ² %.2f (p = %f)", expected, dmax - expected, 100.0 * (dmax - expected) / expected, dmin - expected, 100.0 * (dmin - expected) / expected, chisq, pochisq(chisq, NBINS - 1) ); return s; } //}}} }; //}}} public: // Accumulators size_t bin[ NBINS ]; size_t samples; size_t inradius; size_t pisamples; unsigned corr0; unsigned corrn; double corr1; double corr2; double corr3; // Result cache Result result[ DATASET_MAX ]; Fail fail; // Enable extra code to report debug information when the long term // normalisation occurs. Note that you could be waiting for a very // long time for that to happen if size_t is a 64-bit type ... //#define CHECK_NORMALISATION #ifdef CHECK_NORMALISATION double skew; #endif Data() { clear(); result[MIN].clear( MIN ); result[MAX].clear( MAX ); } Data( const Json::Data::Handle &data ) : samples( data["Samples"]->As() ) , inradius( data["PiIn"]->As() ) , pisamples( data["PiSamples"]->As() ) { //{{{ Json::Data::Handle binarray = data["Bins"]; if( binarray->GetArraySize() != NBINS ) throw Error( _("Ent%zu::Data: invalid json with %zu bins"), NBITS, binarray->GetArraySize() ); for( size_t i = 0; i < NBINS; ++i ) bin[i] = binarray[i]->As(); for( size_t i = 0; i < DATASET_MAX; ++i ) result[i] = Result( data[ DataSetName(DataSet(i)) ] ); fail = Fail( data["Failed"] ); #ifdef CHECK_NORMALISATION skew = 0.0; #endif } //}}} Data( Results_Only_, const Json::Data::Handle &data ) { //{{{ clear(); samples = data["Samples"]->As(); for( size_t i = 0; i < DATASET_MAX; ++i ) result[i] = Result( data[ DataSetName(DataSet(i)) ] ); fail = Fail( data["Failed"] ); } //}}} void clear() { //{{{ // We don't clear the result cache here, just the accumulators // so as to be ready to begin calculating the next set. The // result cache is overwritten at the completion of that, but // kept around to be queried between runs. memset( bin, 0, sizeof(bin) ); samples = 0; inradius = 0; pisamples = 0; corr0 = NBINS + 1; corrn = 0; corr1 = 0.0; corr2 = 0.0; corr3 = 0.0; #ifdef CHECK_NORMALISATION skew = 0.0; #endif } //}}} void normalise_long_term() { //{{{ if( samples > size_t(-1) / 2 ) { #ifdef CHECK_NORMALISATION Log<0>( "Before:\n%s\n", ReportBinsByFreq(5,5).c_str() ); Log<0>( "s: %zu, ir: %zu, ps: %zu, c1: %f, c2: %f, c3: %f\n", samples, inradius, pisamples, corr1, corr2, corr3 ); #endif double old_samples = double(samples); double old_expected = old_samples / NBINS; double new_expected = old_expected / 2.0; samples = 0; for( size_t i = 0; i < NBINS; ++i ) { double error = double(bin[i]) - old_expected; double chisq = (error * error) / old_expected; double fudge = sqrt(new_expected * chisq); if( error < 0 ) bin[i] = size_t(lrint(new_expected - fudge)); else bin[i] = size_t(lrint(new_expected + fudge)); samples += bin[i]; } double scale = double(samples) / old_samples; corr1 *= scale; corr2 *= scale; corr3 *= scale; #ifdef CHECK_NORMALISATION Log<0>( "After:\n%s\n", ReportBinsByFreq(5,5).c_str() ); Log<0>( "s: %zu, ir: %zu, ps: %zu, c1: %f, c2: %f, c3: %f\n", samples, inradius, pisamples, corr1, corr2, corr3 ); skew += samples - (old_samples / 2.0); Log<0>( "Count skew: %0.1f (%0.15f), total %0.1f\n", samples - (old_samples / 2.0), scale, skew ); Log<0>( "Before: %s\n", result[CURRENT].Report().c_str() ); ComputeResult(); Log<0>( "After : %s\n", result[CURRENT].Report().c_str() ); #endif } if( pisamples > size_t(-1) / 2 ) { #ifdef CHECK_NORMALISATION Log<0>( "s: %zu, ir: %zu, ps: %zu\n", samples, inradius, pisamples ); #endif inradius >>= 1; pisamples >>= 1; #ifdef CHECK_NORMALISATION Log<0>( "s: %zu, ir: %zu, ps: %zu\n", samples, inradius, pisamples ); Log<0>( "Before: %s\n", result[CURRENT].Report().c_str() ); ComputeResult(); Log<0>( "After : %s\n", result[CURRENT].Report().c_str() ); #endif } } //}}} void AddResult( double entropy, double chisq, double mean, double pi, double corr, double minentropy ) { //{{{ static const double Mean = (1u << (NBITS - 1)) - 0.5; result[CURRENT].entropy = entropy; result[CURRENT].chisq = chisq; result[CURRENT].mean = mean; result[CURRENT].pi = pi; result[CURRENT].corr = corr; result[CURRENT].minentropy = minentropy; if( result[MIN].entropy > entropy ) result[MIN].entropy = entropy; if( result[MIN].chisq > chisq ) result[MIN].chisq = chisq; if( fabs(result[MIN].mean - Mean) > fabs(mean - Mean) ) result[MIN].mean = mean; if( fabs(result[MIN].pi - M_PI) > fabs(pi - M_PI) ) result[MIN].pi = pi; if( fabs(result[MIN].corr) > fabs(corr) ) result[MIN].corr = corr; if( result[MIN].minentropy > minentropy ) result[MIN].minentropy = minentropy; if( result[MAX].entropy < entropy ) result[MAX].entropy = entropy; if( result[MAX].chisq < chisq ) result[MAX].chisq = chisq; if( fabs(result[MAX].mean - Mean) < fabs(mean - Mean) ) result[MAX].mean = mean; if( fabs(result[MAX].pi - M_PI) < fabs(pi - M_PI) ) result[MAX].pi = pi; if( fabs(result[MAX].corr) < fabs(corr) ) result[MAX].corr = corr; if( result[MAX].minentropy < minentropy ) result[MAX].minentropy = minentropy; } //}}} void ComputeResult() { //{{{ double dsamples = double(samples); double expected = dsamples / NBINS; double entropy = 0.0; double chisq = 0.0; double sum = 0.0; size_t cmax = 0; double pmax = 0.0; for( size_t i = 0; i < NBINS; ++i ) { double error = double(bin[i]) - expected; double p = double(bin[i]) / dsamples; if( bin[i] > cmax ) { cmax = bin[i]; pmax = p; } if( p > 0.0 ) entropy -= p * log2( p ); chisq += (error * error) / expected; sum += double(bin[i] * i); } // The autocorrelation coefficient for N samples at lag h is: // Rh = Ch / C0 // // Where Ch is the autocovariance function: // Ch = 1/N * Sum( t=1 -> N-h, (Yt - Ybar)(Ytplush - Ybar) ) // // And C0 is the variance function: // C0 = 1/N * Sum( t=1 -> N, (Yt - Ybar)^2 ) // // where Ybar is the sample mean and Ytplush is the sample that // is h samples after Yt (because ascii). Here we only compute // it for h=1. // // Rh ranges between -1.0 and 1.0, with 0 finding no correlation. double c1 = corr1 + corrn * corr0; double c2 = corr2 * corr2; double ac = (dsamples * c1 - c2) / (dsamples * corr3 - c2); AddResult( entropy, chisq, sum / dsamples, 4.0 * double(inradius) / double(pisamples), // report 1.0 if samples*c3-c2 == 0 std::isfinite(ac) ? ac : 1.0, // NIST SP 800-90B Section 9.2, min-entropy of IID sources -log2( (double(cmax) + 2.3 * sqrt(dsamples * pmax * (1.0 - pmax))) / dsamples ) ); } //}}} std::string ReportResult( DataSet set = CURRENT ) const { return stringprintf( "%zu: ", samples ) + result[set].Report(); } std::string ReportResults() const { //{{{ std::string s = stringprintf( "Samples: %zu", samples ); for( unsigned i = 0; i < DATASET_MAX; ++i ) s += stringprintf( "\n%7s: ", DataSetName(DataSet(i)) ) + result[i].Report(); s += "\nFailure: " + fail.Report(); return s; } //}}} // Present the bins ordered by symbol value std::string ReportBins( size_t first_n = NBINS, size_t last_n = NBINS ) const { //{{{ typename Bin::Vector bins; for( size_t i = 0; i < NBINS; ++i ) bins.push_back( Bin( unsigned(i), bin[i] ) ); typename Bin::Vector sorted_bins( bins ); std::stable_sort( sorted_bins.begin(), sorted_bins.end(), Bin::ByFrequency ); for( size_t i = 0; i < NBINS; ++i ) bins[ sorted_bins[i].symbol ].rank = unsigned(i + 1); return Bin::PrettyPrint( bins, samples, first_n, last_n ); } //}}} // Present the bins ordered by symbol frequency std::string ReportBinsByFreq( size_t first_n = NBINS, size_t last_n = NBINS ) const { //{{{ typename Bin::Vector bins; for( size_t i = 0; i < NBINS; ++i ) bins.push_back( Bin( unsigned(i), bin[i] ) ); std::stable_sort( bins.begin(), bins.end(), Bin::ByFrequency ); for( size_t i = 0; i < NBINS; ++i ) bins[i].rank = unsigned(i + 1); return Bin::PrettyPrint( bins, samples, first_n, last_n ); } //}}} std::string ResultAsJSON( DataSet set ) const { return '"' + std::string(DataSetName(set)) + "\":" + result[set].AsJSON(); } std::string ResultsAsJSON() const { //{{{ return stringprintf( "{" "\"Samples\":%zu", samples ) + ',' + ResultAsJSON( CURRENT ) + ',' + ResultAsJSON( MIN ) + ',' + ResultAsJSON( MAX ) + ",\"Failed\":" + fail.AsJSON() + '}'; } //}}} std::string AsJSON() const { //{{{ std::string s; s = stringprintf( "{" "\"Samples\":%zu" ",\"Bins\":[%zu", samples, bin[0] ); for( size_t i = 1; i < NBINS; ++i ) s += stringprintf( ",%zu", bin[i] ); s += stringprintf( "]" ",\"PiSamples\":%zu" ",\"PiIn\":%zu", pisamples, inradius ); for( unsigned d = 0; d < DATASET_MAX; ++d ) s += ',' + ResultAsJSON( DataSet(d) ); s += ",\"Failed\":" + fail.AsJSON(); return s + '}'; } //}}} }; //}}} private: // Bytes used as Monte Carlo co-ordinates. This should be no more // bits than the mantissa of the "double" floating point type. static const unsigned MONTE_BYTES = 6; size_t m_short_len; uint64_t m_radius; Data m_short; Data m_previous_short; Data m_long; bool m_have_results; bool m_have_unchecked_results; size_t m_entropy_converged; size_t m_mean_converged; size_t m_pi_converged; size_t m_corr_converged; size_t m_minentropy_converged; size_t m_ok_wait; // This one always operates on 8 bit samples, even in 16-bit mode void analyse_monte( const uint8_t *buf, size_t len ) { //{{{ // Are we inside or outside the radius of a circle with 24bit coordinates for( size_t i = 0; i + MONTE_BYTES < len; i += MONTE_BYTES ) { uint64_t x = 0, y = 0; for( size_t j = 0; j < MONTE_BYTES / 2; ++j ) { x = x * 256 + buf[i+j]; y = y * 256 + buf[MONTE_BYTES/2+i+j]; } if( x*x + y*y <= m_radius ) ++m_short.inradius; ++m_short.pisamples; } } //}}} void analyse( const T *buf, size_t len ) { //{{{ for( size_t i = 0; i < len; ++i ) { // Count bin frequencies for entropy and Chi^2 calculation m_short.bin[ buf[i] ]++; // Compute autocorrelation if( m_short.corr0 > NBINS ) m_short.corr0 = buf[i]; else m_short.corr1 += m_short.corrn * buf[i]; m_short.corrn = buf[i]; m_short.corr2 += m_short.corrn; m_short.corr3 += m_short.corrn * m_short.corrn; } m_short.samples += len; if( m_short.samples == m_short_len ) flush(); } //}}} const Limits &GetLimits() const; public: Ent( size_t short_len = 0 ) : m_short_len( short_len ? short_len : NBITS == 8 ? 500000 : 100000000 ) , m_radius( uint64_t(floor( pow( pow(256.0, MONTE_BYTES / 2) - 1.0, 2.0 ) )) ) , m_have_results( false ) , m_have_unchecked_results( false ) , m_entropy_converged( 0 ) , m_mean_converged( 0 ) , m_pi_converged( 0 ) , m_corr_converged( 0 ) , m_minentropy_converged( 0 ) , m_ok_wait( 1 ) { Log<2>( "+ Ent%zu( %zu )\n", NBITS, m_short_len ); } ~Ent() { Log<2>( "- Ent%zu( %zu )\n", NBITS, m_short_len ); } void clear() { m_short.clear(); m_long.clear(); } // You don't usually want to call this, except to flush a final block // of samples when the input may not be a multiple of the short block // length, and when you really only care about the results of the long // term analysis (since the short block result will not be typical if // it is analysed at a different length to the previous blocks). void flush() { //{{{ if( m_short.samples == 0 ) return; size_t long_minsamples = GetLimits().long_minsamples; size_t long_samples = m_long.samples; for( size_t i = 0; i < NBINS; ++i ) m_long.bin[i] += m_short.bin[i]; if( m_long.corr0 > NBINS ) m_long.corr0 = m_short.corr0; m_long.corrn = m_short.corrn; m_long.corr1 += m_short.corr1; m_long.corr2 += m_short.corr2; m_long.corr3 += m_short.corr3; m_long.inradius += m_short.inradius; m_long.pisamples += m_short.pisamples; m_long.samples += m_short.samples; m_short.ComputeResult(); m_long.ComputeResult(); m_long.normalise_long_term(); m_previous_short = m_short; m_short.clear(); m_have_results = true; m_have_unchecked_results = true; // Reset the long term min/max watermarks once we have enough samples // collected to expect they should remain within the tighter failure // limits if everything is working correctly. They aren't as useful // to us if they always get set during the first few sample periods. if( long_samples <= long_minsamples && m_long.samples > long_minsamples ) m_long.result[MIN] = m_long.result[MAX] = m_long.result[CURRENT]; } //}}} void Analyse( const uint8_t *buf, size_t len ) { //{{{ size_t sample_len = len / sizeof(T); if( m_short.samples + sample_len > m_short_len ) { size_t r = (sample_len - (m_short_len - m_short.samples)) * sizeof(T); Analyse( buf, r ); Analyse( buf + r, len - r ); return; } // We probably can't always be guaranteed of alignment here, so we // might need to copy this on arches that care about that a lot. // But all the currently existing users of it that we have, do have // sufficient alignment to cast to (at least) uint16_t, so suppress // the compile time warning and throw at runtime if that's not true. // // If this ever isn't true, we can use IsAligned to check if we need // to copy it to an aligned bounce buffer first. try { const T *b = aligned_recast< const T* >( buf ); analyse_monte( buf, len ); analyse( b, sample_len ); } catch( const std::exception &e ) { throw Error( "Ent%zu::Analyse: %s", NBITS, e.what() ); } } //}}} bool IsOk( bool was_ok = true ) { //{{{ if( ! m_have_results ) return false; if( ! m_have_unchecked_results ) return m_ok_wait == 1; m_have_unchecked_results = false; m_short.fail.tested++; m_long.fail.tested++; const Limits &lim = GetLimits(); const Result &sr = m_short.result[CURRENT]; const Result &lr = m_long.result[CURRENT]; bool passed = true; if( sr.entropy < lim.short_entropy ) { m_short.fail.entropy++; passed = false; } if( lr.entropy < lim.long_entropy ) { if( m_long.samples > lim.long_minsamples ) { m_long.fail.entropy++; m_entropy_converged = 0; passed = false; } } else if( ! m_entropy_converged ) { m_entropy_converged = m_long.samples; } if( sr.minentropy < lim.short_minentropy ) { m_short.fail.minentropy++; passed = false; } if( lr.minentropy < lim.long_minentropy ) { if( m_long.samples > lim.long_minsamples ) { m_long.fail.minentropy++; m_minentropy_converged = 0; passed = false; } } else if( ! m_minentropy_converged ) { m_minentropy_converged = m_long.samples; } if( sr.chisq < lim.short_chisq_min || sr.chisq > lim.short_chisq_max ) { m_short.fail.chisq++; passed = false; } if( lr.chisq < lim.long_chisq_min || lr.chisq > lim.long_chisq_max ) { m_long.fail.chisq++; passed = false; } if( sr.mean < lim.short_mean_min || sr.mean > lim.short_mean_max ) { m_short.fail.mean++; passed = false; } if( lr.mean < lim.long_mean_min || lr.mean > lim.long_mean_max ) { if( m_long.samples > lim.long_minsamples ) { m_long.fail.mean++; m_mean_converged = 0; passed = false; } } else if( ! m_mean_converged ) { m_mean_converged = m_long.samples; } if( sr.pi < M_PI - lim.short_pi || sr.pi > M_PI + lim.short_pi ) { m_short.fail.pi++; passed = false; } if( lr.pi < M_PI - lim.long_pi || lr.pi > M_PI + lim.long_pi ) { if( m_long.samples > lim.long_minsamples ) { m_long.fail.pi++; m_pi_converged = 0; passed = false; } } else if( ! m_pi_converged ) { m_pi_converged = m_long.samples; } if( sr.corr < -lim.short_corr || sr.corr > lim.short_corr ) { m_short.fail.corr++; passed = false; } if( lr.corr < -lim.long_corr || lr.corr > lim.long_corr ) { if( m_long.samples > lim.long_minsamples ) { m_long.fail.corr++; m_corr_converged = 0; passed = false; } } else if( ! m_corr_converged ) { m_corr_converged = m_long.samples; } // If we're told the current state is "not ok", and we've seen // a measured failure within the last recovery_blocks short blocks, // then we report things are still not ok until we're sure that the // recovery is actually sustained. If we haven't actually seen any // failure, then report the current status immediately. // // This allows two things: // // - An assumption at startup that things are not ok until we've // actually seen that the first block probably is. In that case // we don't want to wait a long time, since the system might be // waiting on us to provide sufficent boot entropy. // // - A mechanism to bypass the recovery hysteresis for analysis // code that really wants to know the true state of each block // individually (or that wants to provide its own rules for // deciding when a recovery is believed to have happened). if( passed ) { if( ! was_ok && m_ok_wait != 1 && m_long.samples - m_ok_wait < lim.recovery_blocks * m_short_len ) { passed = false; } else { // m_long.samples increments in multiples of m_short_len, so // this value should never be seen if the recovery timer is // actually running. m_ok_wait = 1; } } else { m_ok_wait = m_long.samples; } m_previous_short.fail = m_short.fail; return passed; } //}}} bool HaveResults() const { return m_have_results; } const Result &ShortTermResult( DataSet set = CURRENT ) const { return m_short.result[set]; } const Result &LongTermResult( DataSet set = CURRENT ) const { return m_long.result[set]; } const Data &ShortTermData() const { return m_previous_short; } const Data &LongTermData() const { return m_long; } std::string ResultsAsJSON() const { //{{{ return stringprintf( "\"Ent%zu\":{", NBITS ) + "\"Short\":" + m_previous_short.ResultsAsJSON() + ",\"Long\":" + m_long.ResultsAsJSON() + '}'; } //}}} std::string AsJSON() const { //{{{ return stringprintf( "\"Ent%zu\":{", NBITS ) + "\"Short\":" + m_previous_short.AsJSON() + ",\"Long\":" + m_long.AsJSON() + '}'; } //}}} }; //}}} template< typename T> typename Ent::Results_Only_ Ent::Results_Only; // Don't warn about using "GNU old-style field designators" with clang, // or emit a warning if -Wgnu-designator is an unknown option elsewhere. // When C++ gives us something we can use which is nicer than this for // initialising POD structures, we will use it. Until then this is it. EM_TRY_PUSH_DIAGNOSTIC_IGNORE("-Wgnu-designator") template <> inline const Ent::Limits &Ent::GetLimits() const { //{{{ // We allow the long term measures 250MB to converge on the ranges // that we test for here. Most of those will converge to limits // which are much tighter than we allow for here, but that can take // quite a bit longer before they are sure to be stable within it, // even when all is working just as it should inside the expected // degree of variability. A generator running at 1Mbps will take // a bit over 30 minutes to reach this threshold, so this is a // reasonable compromise between tight bounds and the amount of // time needed before we can test them. Any failure that would // trip these bounds before that time will either also trip the // short term limits (or other tests like the FIPS set), or will // be not so far from statistically random that it couldn't be a // legitimate sequence from the long end of the tail. // // Some of them are likely to converge faster than this, at least // in the best or average case, but it doesn't seem to be worth // having the extra complication of individual lengths for them, // we can just use a tighter bound for those rather than trying // to optimise limits for them in multiple dimensions. static const Limits lim8 = { long_minsamples: 250000000, long_entropy: 7.999999, short_entropy: 7.999, // Random expected outside than this less than once in 1 million trials long_chisq_min: 161.643, long_chisq_max: 377.053, // Random expected outside than this less than once in 100 million trials short_chisq_min: 147.374, short_chisq_max: 400.965, long_mean_min: 127.5 - 0.019, long_mean_max: 127.5 + 0.019, short_mean_min: 127.5 - 0.58, short_mean_max: 127.5 + 0.58, long_pi: 0.0003 * M_PI, short_pi: 0.0097 * M_PI, long_corr: 0.00025, short_corr: 0.0078, long_minentropy: 7.99, short_minentropy: 7.73, recovery_blocks: 10, }; return lim8; } //}}} template <> inline const Ent::Limits &Ent::GetLimits() const { //{{{ // We allow the long term measures 800MB to converge on the ranges that // we test for here. Most of those will converge to limits which are // much tighter than we allow for here, but that can take quite a bit // longer before they are sure to be stable within it, even when all is // working just as it should inside the expected degree of variability. // A generator running at 1Mbps will take a bit under two hours to reach // this threshold, which is a bit less reasonable than the compromise we // make for 8 bit data, but we do also need a lot more data here to get // similarly significant results with 16-bit words. Ideally it should // probably be even larger than this, so that we can have bounds which // are about as tight as the 8-bit ones are (or have dynamic bounds that // tighten up as we do get more data), but this offers us a reasonable // sanity check for a first pass that will trigger in a not totally // unreasonable time if it's actually the first test to detect some // failure (which it is reasonably unlikely to be). The 16-bit analysis // is more for reassuring people who want to look at the data in detail // than for automatic health checks, so the delay is more 'annoying to // people' than a risk to the integrity of the system. // // In a similar vein we only require 3 good blocks here before lifting // the failure lockout, which isn't as big a compromise on assurance of // correct functioning as it might first seem. With the default short // block lengths of 500k samples for 8-bit testing and 100M samples for // the 16-bit tests, that means at least 600 8-bit tests need to have // continued passing during that time (as well as all the other tests) // if normal functioning is actually to resume. If we expect the same // number of good blocks here as we do for the 8-bit testing, then a // single transient failure can lock the device out for several hours, // and we can't totally rule out an occasionally exceptional result, // Because Random. There is a balancing game between making the limits // here tight enough to quickly catch abnormality but not so tight that // they are expected to be exceeded with 'annoying regularity' by a // properly functioning device. static const Limits lim16 = { long_minsamples: 800000000, long_entropy: 15.9999, short_entropy: 15.9995, // Random expected outside than this less than once in 1 million trials long_chisq_min: 63823.624, long_chisq_max: 67265.364, // Random expected outside than this less than once in 100 million trials short_chisq_min: 321.0, short_chisq_max: 67459.181, long_mean_min: 32767.5 - 1.87, long_mean_max: 32767.5 + 1.87, short_mean_min: 32767.5 - 7.69, short_mean_max: 32767.5 + 7.69, long_pi: 0.000088 * M_PI, short_pi: 0.000395 * M_PI, long_corr: 0.00011, short_corr: 0.00044, long_minentropy: 15.893, short_minentropy: 15.708, recovery_blocks: 3, }; return lim16; } //}}} EM_POP_DIAGNOSTIC // Convenience types for 8 and 16 bit Ent instances typedef Ent Ent8; typedef Ent Ent16; // Tracks the length of runs of consecutive 0 or 1 bits, and the number of // runs of each length. This turns out to also be the "General Runs Test" // described in AIS-31, though we were doing this before I read that. It // can be run as a standalone test on an arbitrary length block of data, // but we also populate it from the FIPS test analysis routine (since we // are already counting runs of consecutive bits there so populating this // structure with that information is essentially 'free'). template< size_t MaxRun > class BitRun { //{{{ public: struct Result { //{{{ private: mutable double m_expected[ MaxRun ]; mutable double m_err[2][ MaxRun ]; mutable double m_chisq; mutable double m_chisqp; mutable unsigned m_chisqk; void compute_chisq() const { //{{{ if( m_chisqk ) return; double nbits = double(total[0] + total[1]); double div = 8.0; // 2^3 m_chisq = 0.0; for( size_t i = 0, e = std::min(maxrun, MaxRun); i < e; ++i ) { // The expected number of runs of length i in a // sequence of n bits is (n - i + 3) / 2^(i+2) m_expected[i] = (nbits - double(i) + 2.0) / div; m_err[0][i] = double(runlengths[0][i]) - m_expected[i]; m_err[1][i] = double(runlengths[1][i]) - m_expected[i]; if( m_expected[i] >= 5.0 ) { m_chisq += m_err[0][i] * m_err[0][i] / m_expected[i] + m_err[1][i] * m_err[1][i] / m_expected[i]; m_chisqk = unsigned(i); } div *= 2.0; } for( size_t i = maxrun; i < MaxRun; ++i ) { m_expected[i] = (nbits - double(i) + 2) / div; if( m_expected[i] >= 5.0 ) { m_err[0][i] = double(runlengths[0][i]) - m_expected[i]; m_err[1][i] = double(runlengths[1][i]) - m_expected[i]; m_chisq += m_err[0][i] * m_err[0][i] / m_expected[i] + m_err[1][i] * m_err[1][i] / m_expected[i]; m_chisqk = unsigned(i); } else break; div *= 2.0; } // The χ² degrees of freedom are k = 2 * i - 1, where i is the // longest runlength expected to occur at least 5 times. We // could approximate this from GetExpectedMax() - 4, but we'd // still need a test inside the loop for whether to add the // error term to the m_chisq total, so that doesn't gain us a // lot in this function. // // The probability computed here is still a bit handwavy in any // case. The distribution of runs is only approximated by χ², // and the cutoff of "at least 5" is an arbitrary convention, // that has a long history of use but no real formal proof of // its significance compared to other reasonable but arbitrary // cutoff values. It gives us a reaonable guide for whether // some result is "about right" or "way off what is expected", // but the boundary between those two is less well defined in // this case. m_chisqk = m_chisqk * 2 + 1; // m_chisqk == i-1 before this m_chisqp = pochisq( m_chisq, m_chisqk ); } //}}} public: size_t runlengths[2][ MaxRun ]; size_t total[2]; size_t maxrun; Result() : m_chisq( 0.0 ) , m_chisqp( 0.0 ) , m_chisqk( 0 ) , maxrun( 0 ) { memset( m_expected, 0, sizeof(m_expected) ); memset( m_err, 0, sizeof(m_err) ); memset( runlengths, 0, sizeof(runlengths) ); memset( total, 0, sizeof(total) ); } Result( const Json::Data::Handle &result ) : m_chisq( result["Chisq"] ) , m_chisqp( result["Chisq-p"] ) , m_chisqk( result["Chisq-k"]->As() ) , maxrun( result["Max"]->As() ) { //{{{ Json::Data::Handle runs = result["Runs"]; size_t nruns = runs->GetArraySize(); if( nruns > MaxRun ) throw Error( _("BitRun<%zu>::Result: invalid json with %zu runs"), MaxRun, nruns ); memset( m_expected, 0, sizeof(m_expected) ); memset( m_err, 0, sizeof(m_err) ); memset( runlengths, 0, sizeof(runlengths) ); total[0] = result["Zeros"]->As(); total[1] = result["Ones"]->As(); for( size_t i = 0; i < nruns; ++i ) { runlengths[0][i] = runs[i][0]->As(); runlengths[1][i] = runs[i][1]->As(); m_expected[i] = runs[i][2]; m_err[0][i] = double(runlengths[0][i]) - m_expected[i]; m_err[1][i] = double(runlengths[1][i]) - m_expected[i]; } } //}}} void InvalidateChisq() { m_chisqk = 0; } double GetBias() const { return double(total[0]) / double(total[1]); } double GetChisq( double *p = NULL ) const { //{{{ compute_chisq(); if( p ) *p = m_chisqp; return m_chisq; } //}}} // Returns the runlength that is expected with probability ~0.5. // The actual maximum runlength which is seen has a fairly high // probability of being within about +/- 3 of this value. size_t GetExpectedMax() const { return size_t(lrint( log2( double(total[0] + total[1]) / 2.0 ) )); } // Tell UBSan that it's ok to have Inf and NaN results here as the // 'correctly reported' limits of some calculations, as defined by // IEC 559 / IEEE 754 for floating point math. BB_NO_SANITIZE_FLOAT_DIVIDE_BY_ZERO std::string Report() const { //{{{ compute_chisq(); std::string s( "run of zeros ones bias" " expect e0 % e1 %" " e0²/e e1²/e d0 d1\n" ); for( size_t i = 0, e = std::min(maxrun, MaxRun); i < e; ++i ) { size_t zeros = runlengths[0][i]; size_t ones = runlengths[1][i]; double e0 = m_err[0][i]; double e1 = m_err[1][i]; double expected = m_expected[i]; s += stringprintf( "%5zu: %12zu %12zu %12.6f %16.3f " "% 10.4f % 10.4f %9.2f%c %9.2f%c", i + 1, zeros, ones, double(zeros)/double(ones), expected, 100.0 * e0 / expected, 100.0 * e1 / expected, e0 * e0 / expected, double(zeros) < expected ? '-' : ' ', e1 * e1 / expected, double(ones) < expected ? '-' : ' ' ); if( i ) s += stringprintf( " %10.6f %10.6f\n", double(zeros) / double(runlengths[0][i-1]), double(ones) / double(runlengths[1][i-1]) ); else s += '\n'; } s += stringprintf( "\ntotal: %12zu %12zu %12.6f χ² = %f (p = %f), k = %u", total[0], total[1], double(total[0]) / double(total[1]), m_chisq, m_chisqp, m_chisqk ); if( maxrun >= MaxRun ) s += stringprintf( "\nMax run was %zu", maxrun ); return s; } //}}} std::string AsJSON() const { //{{{ compute_chisq(); std::string s( 1, '{' ); s += stringprintf( "\"Zeros\":%zu" ",\"Ones\":%zu" ",\"Max\":%zu", total[0], total[1], maxrun ); s += ",\"Runs\":["; for( size_t i = 0, n = std::min(maxrun, MaxRun); i < n; ++i ) { if( i ) s += ','; s += stringprintf( "[%zu,%zu,%f]", runlengths[0][i], runlengths[1][i], m_expected[i] ); } s += ']'; s += stringprintf( ",\"Chisq\":%f" ",\"Chisq-p\":%f" ",\"Chisq-k\":%u", m_chisq, m_chisqp, m_chisqk ); s += '}'; return s; } //}}} }; //}}} private: Result m_result; size_t m_runlength; unsigned m_runbit; public: BitRun() : m_runlength( 0 ) , m_runbit( 2 ) {} void clear() { m_result = Result(); m_runlength = 0; m_runbit = 2; } // You don't usually want to call this, except to flush the final run of // bits at the ultimate completion of analysis. If more bits are added // after it is called, the first bit will be considered as the start of // a new run, even if it is the same as the last bit previously added. void flush() { //{{{ if( __builtin_expect(m_runbit != 2, 1) ) { m_result.InvalidateChisq(); if( m_result.maxrun < m_runlength ) m_result.maxrun = m_runlength; size_t n = m_runlength < MaxRun ? m_runlength - 1 : MaxRun - 1; ++m_result.runlengths[m_runbit][n]; m_runlength = 0; m_runbit = 2; } } //}}} void AddBits( unsigned bit, size_t len ) { //{{{ m_result.InvalidateChisq(); m_result.total[bit] += len; if( __builtin_expect(m_runbit != 2, 1) ) { if( bit == m_runbit ) { m_runlength += len; return; } if( m_result.maxrun < m_runlength ) m_result.maxrun = m_runlength; size_t n = m_runlength < MaxRun ? m_runlength - 1 : MaxRun - 1; ++m_result.runlengths[m_runbit][n]; } m_runlength = len; m_runbit = bit; } //}}} void AddBits( const uint8_t *buf, size_t len ) { //{{{ unsigned run_length = 0; unsigned run_bit = 2; for( size_t i = 0; i < len; ++i ) { for( int b = 7; b >= 0; --b ) { unsigned bit = buf[i] & 1u << b ? 1 : 0; if( bit == run_bit ) { ++run_length; continue; } if( __builtin_expect(run_bit != 2, 1) ) AddBits( run_bit, run_length ); run_bit = bit; run_length = 1; } } if( __builtin_expect(run_bit != 2, 1) ) AddBits( run_bit, run_length ); } //}}} const Result &GetResult() const { return m_result; } }; //}}} // Convenience type for a default MaxRun length of 64. // The probability of runs up to this length in a uniformly random distribution // is about 1 in 37 Exabits, so this should be enough resolution for most uses. typedef BitRun<64> BitRuns; // Rolling statistics on runs of consecutive test passes template< size_t Q, size_t PERIOD > class PassRuns { //{{{ private: size_t m_count; size_t m_runs; size_t m_previous; size_t m_avg; size_t m_long_avg; size_t m_peak; public: static const size_t AVG_Q = Q; static const size_t AVG_PERIOD = PERIOD; PassRuns() : m_count( 0 ) , m_runs( 0 ) , m_previous( 0 ) , m_avg( 0 ) , m_long_avg( 0 ) , m_peak( 0 ) {} PassRuns( const Json::Data::Handle &pass ) : m_count( 0 ) , m_runs( pass["Runs"]->As() ) , m_previous( pass["Previous"]->As() ) , m_avg( pass["Short"]->As() << AVG_Q ) , m_long_avg( pass["Long"]->As() * m_runs ) , m_peak( pass["Peak"]->As() ) {} void clear() { //{{{ m_count = 0; m_runs = 0; m_previous = 0; m_avg = 0; m_long_avg = 0; m_peak = 0; } //}}} void Pass() { ++m_count; } // Returns true if the peak run length was exceeded bool Fail() { //{{{ m_runs++; m_avg = (m_avg * (AVG_PERIOD - 1) + (m_count << AVG_Q)) / AVG_PERIOD; m_long_avg += m_count; if( m_long_avg > size_t(-1) / 2 || m_runs > size_t(-1) / 2 ) { m_long_avg >>= 1; m_runs >>= 1; } if( m_count > m_peak ) { m_peak = m_count; m_previous = m_count; m_count = 0; return true; } m_previous = m_count; m_count = 0; return false; } //}}} size_t Runs() const { return m_runs; } size_t LongTerm() const { return m_runs ? m_long_avg / m_runs : 0; } size_t ShortTerm() const { return m_avg >> AVG_Q; } size_t Peak() const { return m_peak; } size_t Current() const { return m_count; } size_t Previous() const { return m_previous; } std::string Report() const { return stringprintf( "%zu, short %zu, long %zu, peak %zu", m_runs, ShortTerm(), LongTerm(), m_peak ); } std::string AsJSON() const { //{{{ return stringprintf( "{" "\"Runs\":%zu" ",\"Previous\":%zu" ",\"Short\":%zu" ",\"Long\":%zu" ",\"Peak\":%zu" "}", m_runs, m_previous, ShortTerm(), LongTerm(), m_peak ); } //}}} }; //}}} // Rolling statistics on test failure rates template< size_t Q, size_t PERIOD > struct FailRate { //{{{ private: void normalise_long_term() { //{{{ // We could keep a count of the number of times we have // needed to normalise the long term numbers to avoid // overflow, but it's probably not an interesting stat. if( pass + fail > size_t(-1) / 2 ) { pass >>= 1; fail >>= 1; } } //}}} public: static const size_t AVG_Q = Q; static const size_t AVG_PERIOD = PERIOD; size_t pass; size_t fail; size_t peak; size_t rate; FailRate() : pass( 0 ) , fail( 0 ) , peak( 0 ) , rate( 0 ) {} FailRate( const Json::Data::Handle &stats ) : pass( stats["Passed"]->As() ) , fail( stats["Failed"]->As() ) , peak( size_t(stats["Peak"] * (1u << AVG_Q)) ) , rate( size_t(stats["Short"] * (1u << AVG_Q)) ) {} void clear() { //{{{ pass = 0; fail = 0; peak = 0; rate = 0; } //}}} void Pass() { //{{{ ++pass; rate = rate * (AVG_PERIOD - 1) / AVG_PERIOD; normalise_long_term(); } //}}} // Returns true if the peak (short term) failure rate was exceeded bool Fail() { //{{{ ++fail; // We could probably just do this for each Pass, since any // test that fails every time has bigger problems than an // overflowing size_t on the number of times it did. normalise_long_term(); rate = (rate * (AVG_PERIOD - 1) + (1u< peak ) { peak = rate; return true; } return false; } //}}} double LongTerm() const { return pass + fail ? double(fail) / double(pass + fail) : 0.0; } double ShortTerm() const { return double(rate) / (1u << AVG_Q); } double Peak() const { return double(peak) / (1u << AVG_Q); } std::string Report() const { return stringprintf( "%zu / %zu, short %f, long %f, peak %f", fail, pass + fail, ShortTerm(), LongTerm(), Peak() ); } std::string AsJSON() const { //{{{ return stringprintf( "{" "\"Passed\":%zu" ",\"Failed\":%zu" ",\"Short\":%f" ",\"Long\":%f" ",\"Peak\":%f" "}", pass, fail, ShortTerm(), LongTerm(), Peak() ); } //}}} }; //}}} // Implements the tests from FIPS 140-2, including the Continuous RNG Test // from section 4.9.2, and the statistical tests (which were removed from // section 4.9.1 in Change Notice 2). These aren't particularly good tests // to evaluate the quality of a RNG, or for detecting subtle manipulation // of its output, but they are fast to run, and should quickly catch most // genuine failure modes of a known-good generator or any unsophisticated // attempt to degrade its operation in some way during normal running. // // It also implements the Adaptive Proportion Test from the NIST SP 800-90B // 2012 draft recommended continuous tests, described in section 6.5.1.2.2. // That test runs over a window of 65536 8-bit samples rather than the // blocks of 2500 samples the FIPS 140-2 tests use, so its results are // accumulated persistently over several analysis blocks. class FIPS { //{{{ public: // The FIPS 140-2 tests are designed for blocks of 20000 bits static const unsigned BUFFER_SIZE = 2500; // Q20 resolution, Short term period 1000 samples typedef QA::FailRate< 20, 1000 > FailRate; // Q14 resolution, Short term period 10 runs typedef QA::PassRuns< 14, 10 > PassRuns; enum Test { MONOBIT = 0, POKER = 1, RUNS = 2, LONG_RUN = 3, REPETITION = 4, PROPORTION = 5, RESULT, TEST_MAX }; static const char *TestName( Test n ) { //{{{ switch( n ) { case MONOBIT: return "Monobit"; case POKER: return "Poker"; case RUNS: return "Runs"; case LONG_RUN: return "Long run"; case REPETITION: return "Repetition"; case PROPORTION: return "Proportion"; case RESULT: return "Result"; case TEST_MAX: break; } return "Unknown test"; } //}}} private: // The 'Continuous test' for 16-bit words really isn't very useful. // We'd expect to get C(65536,1) * p(x)^2 * p(not_x) * 1250 twins // in each block (a pair in about 1.9% of blocks) from a uniformly // random source. Since FIPS-140-2 4.9.2 says this test fails if // any consecutive blocks are equal regardless of the chosen n, we // would fail far too many otherwise good blocks if using n = 16. // // For n = 32 that should become more like a 0.00001455% chance, // so 1 block in about 6.87 million should fail this test (as the // long term average for many billions of blocks). Assuming that // C(1^32,1) * p(1/(1^32))^2 * p(((1^32)-1)/(1^32)) * 625 // is a fair estimation of that probability. // // A better version of this test is described in the NIST SP 800-90B // 2012 draft, in section 6.5.1.2.1 Repetition Count Test. That test // defined an expected failure rate based on the assessed min-entropy // of the generator when operating correctly, but it's still a weak // test since a small degradation in real entropy can take quite a // long time to register as a test failure. The FIPS Continuous Test // we do here for n=32 is equivalent to that for min-entropy H > 30 // bits per sample. //uint16_t m_previous_word; uint32_t m_previous_word; unsigned m_pokerbins[16]; // This one has a dummy extra slot to avoid needing a test // for (not counting) the very first bit in the hot loop. unsigned m_runs[3][6]; // We need to keep persistent counts for the Adaptive Proportion test // since it operates with a different block size to what the rest of // the FIPS 140-2 tests use. uint8_t m_prop_val; unsigned m_prop_count; unsigned m_prop_n; FailRate m_failrate[ TEST_MAX ]; PassRuns m_passruns[ TEST_MAX ]; BitRuns m_bitruns; public: FIPS() : m_previous_word( 0x5EED1E57 ) // chosen by a fair dice roll , m_prop_val( 0 ) , m_prop_count( 0 ) , m_prop_n( 65535 ) { Log<2>( "+ FIPS\n" ); } FIPS( const Json::Data::Handle &fips ) { //{{{ Log<2>( "+ FIPS( json )\n" ); for( unsigned i = 0; i < TEST_MAX; ++i ) { Json::Data::Handle test = fips[TestName(Test(i))]; m_failrate[i] = FailRate( test["FailRate"] ); m_passruns[i] = PassRuns( test["PassRuns"] ); } } //}}} ~FIPS() { Log<2>( "- FIPS\n" ); } unsigned Check( const uint8_t *buf ) { //{{{ const unsigned pokermin = unsigned(( 2.16 + 5000.) * 5000. / 16.); // 1563175 const unsigned pokermax = unsigned((46.17 + 5000.) * 5000. / 16.); // 1576928.125 unsigned result = 0; unsigned ones_count = 0; unsigned run_length = 0; unsigned run_bit = 2; unsigned word = 0; unsigned word_byte = 0; memset( m_pokerbins, 0, sizeof(m_pokerbins) ); memset( m_runs, 0, sizeof(m_runs) ); for( unsigned i = 0; i < BUFFER_SIZE; ++i ) { word = word << 8 | buf[i]; if( ++word_byte == sizeof(m_previous_word) ) { if( m_previous_word == word ) result |= 1u << REPETITION; m_previous_word = word; ones_count += popcount( word ); word = 0; word_byte = 0; } ++m_pokerbins[ buf[i] >> 4 ]; ++m_pokerbins[ buf[i] & 0xf ]; for( int b = 7; b >= 0; --b ) { unsigned bit = buf[i] & 1u << b ? 1 : 0; if( bit == run_bit ) { ++run_length; continue; } if( run_length > 5 ) { if( run_length >= 25 ) result |= 1u << LONG_RUN; ++m_runs[run_bit][5]; } else ++m_runs[run_bit][run_length]; if( __builtin_expect(run_bit != 2, 1) ) m_bitruns.AddBits( run_bit, run_length + 1 ); run_bit = bit; run_length = 0; } // We assume min-entropy H = 8 bits for this test, and perform // it on a window of 65536 samples. The cutoff is chosen for // a 2^-30 chance of reporting a false positive. if( m_prop_val == buf[i] && ++m_prop_count > 358 ) { result |= 1u << PROPORTION; goto reset_prop_count; } if( ++m_prop_n >= 65536 ) { reset_prop_count: m_prop_val = buf[i]; m_prop_count = 0; m_prop_n = 0; } } if( run_length > 5 ) { if( run_length >= 25 ) result |= 1u << LONG_RUN; ++m_runs[run_bit][5]; } else ++m_runs[run_bit][run_length]; m_bitruns.AddBits( run_bit, run_length + 1 ); if( ones_count <= 9725 || ones_count >= 10275 ) result |= 1u << MONOBIT; size_t pokersum = 0; for( unsigned i = 0; i < 16; ++i ) pokersum += m_pokerbins[i] * m_pokerbins[i]; if( pokersum <= pokermin || pokersum > pokermax ) result |= 1u << POKER; for( unsigned i = 0; i < 2; ++i ) { if( m_runs[i][0] < 2315 || m_runs[i][0] > 2685 || m_runs[i][1] < 1114 || m_runs[i][1] > 1386 || m_runs[i][2] < 527 || m_runs[i][2] > 723 || m_runs[i][3] < 240 || m_runs[i][3] > 384 || m_runs[i][4] < 103 || m_runs[i][4] > 209 || m_runs[i][5] < 103 || m_runs[i][5] > 209 ) { result |= 1u << RUNS; break; } } return result ? 1u << RESULT | result : 0; } //}}} bool Analyse( const uint8_t *buf ) { //{{{ unsigned result = Check( buf ); // The Pass counts for the Adaptive Proportion test will be a // bit skewed here, since with 64kB blocks we don't really pass // one for sure with every block processed. In the worst case, // we could fail one for every 2500 byte block though. It's not // really worth special casing for that though, since the rates // of (false positive) failure that are expected for that test // are so much lower than for the rest of them that being off by // even a factor of 26 in a moving average window of 1000 tests // isn't going to make any notable difference to our ability to // trigger on it. Graphing the absolute count is a much more // interesting statistic for that one. If it fires more often // than expected, the Chi-squared metric will probably trip to // warn us first in any case, if no other test does first. for( unsigned i = 0; i < TEST_MAX; ++i ) { if( result & 1u << i ) { m_failrate[i].Fail(); m_passruns[i].Fail(); } else { m_failrate[i].Pass(); m_passruns[i].Pass(); } } return result ? false : true; } //}}} bool IsOk( bool was_ok = true ) const { //{{{ // The runlength maximums are selected with the following rationale: // A maximum pass runlength of 17500 or longer is expected to occur // about once in every 1.17 million runs (about 3.4 TB of samples). // If the generator is running constantly at a 1 Mbps rate, we can // detect if a run is longer than that after about 350 seconds, and // would expect this to happen about once every 10 months. It will // take (possibly much) longer to detect an excess success anomaly // in any of the individual tests, but that would also tend to bias // the overall success rate toward excess unless one of the other // tests was also failing more often than expected, both of which // are likely to increase the chance of detecting it early through // some related measure. Tracking them individually does give us // a longer term gauge that could still be useful though. // // If the generator is run at a considerably higher or lower rate // then it might be worth tuning these to suit that better, but // this isn't a very probable failure mode in real world use. The // generator itself is unlikely to fail into such a mode, and any // influence that an external attacker may be able to have is not // likely to trigger this either (if they are capable of injecting // known data that passes the fail tests, it would be much easier // to ensure it passes this test too). Mostly this is a test of // of the software performing the tests. If we see an excessive // run without any failure, then maybe that's a bug in the test. // // There's not much point to tracking the pass runlengths of the // Adaptive Proportion test. It has a design false positive rate // of around one in 2^-30, so its possibly legitimate limit for // pass runlengths is near enough to infinite for our purposes. // // p(<=maxpass) = Sum(n<=maxpass, p(pass)^n * p(fail)) static const size_t maxpass[TEST_MAX] = { 134500, // one in 1189248 runs is expected to be longer than 134500, 141200, // one in 1178310 runs is expected to be longer than 141200 42500, // one in 1135535 runs is expected to be longer than 42500 46900, // one in 1177119 runs is expected to be longer than 46900 96000000, // one in 1164281 runs is expected to be longer than 96000000 size_t(-1), // one in 41 runs is expected to be longer than 4,000,000,000 17500 // one in 1170399 runs is expected to be longer than 17500 }; // We're actually somewhat less likely to hit these limits, purely // by chance, than what is indicated below, since we are measuring // them against a modified moving average with alpha of 0.001, so // a failure that occurred 1000 samples ago only counts as 37% of // one, while a failure that occurred 5000 samples ago still counts // for 0.6% of a measured tally. The probabilities below show the // chance of getting more than that number of failures in exactly // 1000 trials. Since both more failures are needed to get to that // count, and we're measuring them over a longer trial period, we // can't really define a single precise cutoff rate since it will // depend on not just their average rate, but how they are grouped // together too. It does have the desirable property of weighting // a suddenly high rate of failures more heavily, and discounting // older failures during a recovery period, and the rates are low // enough that any catastrophic failure will quickly exceed them. // For real world use that seems like a more valuable measure than // offering a precise statistical significance as the cutoff for // shutting down output in the event of detected failure. If any // use case does demand a more rigorous bound, then it would need // to be measured against a simple moving average instead. We'll // worry about that if somebody actually makes the case for it as // being the preferred behaviour in some or all instances. // // p(>maxfail) = 1 - Sum(n<=maxfail, C(1000,n) * p(fail)^n * p(pass)^(1000-n)) static const size_t maxfail[TEST_MAX] = { // one in 10858504 chance of > 4 monobit failures per 1000 blocks size_t(0.004 * (1u << FailRate::AVG_Q)), // one in 13834536 chance of > 4 poker failures per 1000 blocks size_t(0.004 * (1u << FailRate::AVG_Q)), // one in 16747771 chance of > 6 runs failures per 1000 blocks size_t(0.006 * (1u << FailRate::AVG_Q)), // one in 31936421 chance of > 6 long run failures per 1000 blocks size_t(0.006 * (1u << FailRate::AVG_Q)), // one in 94575485 chance of > 1 repetition failures per 1000 blocks size_t(0.001 * (1u << FailRate::AVG_Q)), // one in 2307763068086 chance of > 1 proportion failures per 1000 blocks size_t(0.001 * (1u << FailRate::AVG_Q)), // one in 507761 chance of > 7 failures of any kind per 1000 blocks size_t(0.007 * (1u << FailRate::AVG_Q)) }; if( was_ok ) { for( unsigned i = 0; i < TEST_MAX; ++i ) { if( m_failrate[i].rate > maxfail[i] ) return false; if( m_passruns[i].Current() > maxpass[i] ) return false; } } else { // Be conservative about declaring recovery from failure here. // Wait until we're operating well within the defined bounds again. // If everything is operating normally, and the bounds are set well, // we'd expect to fail this test reasonably often in normal running, // but not so often that it's unreasonable to expect to be within // these bounds for some period again before resuming normal service. // At the very least the last 20 blocks must have all passed. // This is mostly just to avoid the case where the source produces // bad results right from its very first output, but we haven't yet // analysed enough blocks from it to trip the average failure rate // thresholds. The pathological case of that being we only read one // one block before calling this function, and it fails, giving us // a running average of 1 failure per 1000, which is well within the // expected rates of failure for almost all tests. This should pass // with near certain probability if the source is functioning ok, we // expect average pass runs of about 1250 blocks between failures. if( m_passruns[RESULT].Current() < 20 ) return false; for( unsigned i = 0; i < TEST_MAX; ++i ) { if( m_failrate[i].rate > maxfail[i] / 2 ) return false; // We test against the previous runlength here, since the // current one may be under the limit because it is still // in progress and we don't know what its length will be. if( maxpass[i] != size_t(-1) && m_passruns[i].Previous() > maxpass[i] / 2 ) return false; } } return true; } //}}} const FailRate &GetFailRate( Test n = RESULT ) const { return m_failrate[n]; } const PassRuns &GetPassRuns( Test n = RESULT ) const { return m_passruns[n]; } std::string ReportFailRates() const { //{{{ std::string s; s += stringprintf( _("Fail rate: %zu / %zu %.3f %.3f %.3f"), m_failrate[RESULT].fail, m_failrate[RESULT].pass + m_failrate[RESULT].fail, m_failrate[RESULT].ShortTerm() * 1000.0, m_failrate[RESULT].LongTerm() * 1000.0, m_failrate[RESULT].Peak() * 1000.0 ); for( unsigned i = 0; i < RESULT; ++i ) if( m_failrate[i].fail ) s += stringprintf( ", %s: %zu %.3f %.3f %.3f", TestName( Test(i) ), m_failrate[i].fail, m_failrate[i].ShortTerm() * 1000.0, m_failrate[i].LongTerm() * 1000.0, m_failrate[i].Peak() * 1000.0 ); return s; } //}}} std::string ReportPassRuns() const { //{{{ std::string s; s += stringprintf( _("Pass runs: %zu %zu %zu %zu"), m_passruns[RESULT].Runs(), m_passruns[RESULT].ShortTerm(), m_passruns[RESULT].LongTerm(), m_passruns[RESULT].Peak() ); for( unsigned i = 0; i < RESULT; ++i ) if( m_passruns[i].Runs() ) s += stringprintf( ", %s: %zu %zu %zu %zu", TestName( Test(i) ), m_passruns[i].Runs(), m_passruns[i].ShortTerm(), m_passruns[i].LongTerm(), m_passruns[i].Peak() ); return s; } //}}} std::string ResultsAsJSON() const { //{{{ std::string s = "\"FIPS\":{"; for( unsigned i = 0; i < TEST_MAX; ++i ) { if( i ) s += ','; s += stringprintf( "\"%s\":{", TestName( Test(i) ) ); s += "\"PassRuns\":" + m_passruns[i].AsJSON(); s += ",\"FailRate\":" + m_failrate[i].AsJSON(); s += '}'; } s += '}'; // FIPS s += ",\"BitRuns\":" + m_bitruns.GetResult().AsJSON(); return s; } //}}} }; //}}} } // QA namespace } // BitB namespace #endif // _BB_QA_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/refptr.h0000644000000000000000000001771614136173163015701 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2003 - 2016, Ron #ifndef _BB_REFPTR_H #define _BB_REFPTR_H #include namespace BitB { template< typename T > class RefPtr { //{{{ private: T *m_refptr; public: typedef T Object; typedef RefPtr< const T > Const; RefPtr( T *ptr = NULL ) : m_refptr( ptr ) { if( m_refptr ) m_refptr->Ref(); } RefPtr( T &obj ) : m_refptr( &obj ) { if( m_refptr->GetRefCount() == 0 ) m_refptr->Ref(); m_refptr->Ref(); } RefPtr( const RefPtr &ptr ) : m_refptr( ptr.Raw() ) { if( m_refptr ) m_refptr->Ref(); } template< typename U > RefPtr( const RefPtr< U > &ptr ) : m_refptr( ptr.Raw() ) { if( m_refptr ) m_refptr->Ref(); } ~RefPtr() { if( m_refptr ) m_refptr->Unref(); } RefPtr &operator=( T *ptr ) { if( ptr ) ptr->Ref(); if( m_refptr ) m_refptr->Unref(); m_refptr = ptr; return *this; } RefPtr &operator=( const RefPtr &ptr ) { if( ptr.m_refptr ) ptr.m_refptr->Ref(); if( m_refptr ) m_refptr->Unref(); m_refptr = ptr.m_refptr; return *this; } template< typename U > RefPtr &operator=( const RefPtr< U > &ptr ) { U *p = ptr.Raw(); if( p ) p->Ref(); if( m_refptr ) m_refptr->Unref(); m_refptr = p; return *this; } T &operator*() const { if( __builtin_expect(m_refptr == NULL, 0) ) throw Error( "Attempt to dereference NULL %s", EM_TYPEOF( m_refptr ) ); return *m_refptr; } T *operator->() const { if( __builtin_expect(m_refptr == NULL, 0) ) throw Error( "Attempt to dereference NULL %s", EM_TYPEOF( m_refptr ) ); return m_refptr; } T* Raw() const { return m_refptr; } template< typename U > RefPtr< U > Downcast() const { U *u = dynamic_cast< U* >( m_refptr ); return RefPtr< U >( u ); } template< typename U > bool operator==( const RefPtr< U > &ptr ) const { return m_refptr == ptr.Raw(); } template< typename U > bool operator!=( const RefPtr< U > &ptr ) const { return m_refptr != ptr.Raw(); } bool operator==( T *ptr ) const { return m_refptr == ptr; } bool operator!=( T *ptr ) const { return m_refptr != ptr; } bool operator!() const { return m_refptr == NULL; } }; //}}} template< typename T > class RefCountedBy { //{{{ private: mutable T m_count; #ifdef _REENTRANT mutable pthread_mutex_t m_mutex; void mutex_lock() const { pthread_mutex_lock(&m_mutex); } void mutex_unlock() const { pthread_mutex_unlock(&m_mutex); } #else void mutex_lock() const {} void mutex_unlock() const {} #endif RefCountedBy( const RefCountedBy& ); RefCountedBy &operator=( const RefCountedBy& ); public: typedef RefPtr< RefCountedBy > Handle; RefCountedBy() : m_count( 0 ) { #ifdef _REENTRANT pthread_mutex_init(&m_mutex, NULL); #endif } virtual ~RefCountedBy() { #ifdef _REENTRANT pthread_mutex_destroy(&m_mutex); #endif } virtual void Ref() const { mutex_lock(); ++m_count; if( __builtin_expect(m_count == 0, 0) ) { mutex_unlock(); throw Error( "Ref with zero ref count. RefCountedBy<%s> overflow", EM_TYPEOF(T) ); } mutex_unlock(); } virtual void Unref() const { mutex_lock(); if( __builtin_expect(m_count == 0, 0) ) { mutex_unlock(); throw Error( "Unref with zero ref count in RefCountedBy<%s>", EM_TYPEOF(T) ); } if( --m_count == 0 ) { mutex_unlock(); delete this; return; } mutex_unlock(); } T GetRefCount() const { return m_count; } template< typename U > U &As() { return dynamic_cast< U& >( *this ); } }; //}}} typedef RefCountedBy RefCounted; #ifdef _REENTRANT class ScopedMutex { //{{{ private: pthread_mutex_t *m_mutex; ScopedMutex( const ScopedMutex& ); ScopedMutex &operator=( const ScopedMutex& ); public: explicit ScopedMutex( pthread_mutex_t *mutex = NULL ) : m_mutex( mutex ) { if( __builtin_expect(m_mutex != NULL,1) ) { int ret = pthread_mutex_lock( m_mutex ); if( __builtin_expect(ret,0) ) throw SystemError( ret, "ScopedMutex: failed to acquire mutex" ); } } ~ScopedMutex() { if( m_mutex ) { int ret = pthread_mutex_unlock( m_mutex ); if( __builtin_expect(ret,0) ) LogErr<0>( ret, "~ScopedMutex: failed to release mutex" ); } } void Lock( pthread_mutex_t *mutex ) { if( __builtin_expect(m_mutex != NULL,0) ) throw Error( "ScopedMutex::Lock: Another mutex is already held" ); int ret = pthread_mutex_lock( mutex ); if( __builtin_expect(ret,0) ) throw SystemError( ret, "ScopedMutex::Lock: failed to acquire mutex" ); m_mutex = mutex; } void Unlock() { if( __builtin_expect(m_mutex == NULL,0) ) throw Error( "ScopedMutex::Unlock: No mutex held" ); pthread_mutex_t *m = m_mutex; m_mutex = NULL; int ret = pthread_mutex_unlock( m ); if( __builtin_expect(ret,0) ) throw SystemError( ret, "ScopedMutex::Unlock: failed to release mutex" ); } }; //}}} class ScopedCancelState { //{{{ private: int m_oldstate; ScopedCancelState( const ScopedCancelState& ); ScopedCancelState &operator=( const ScopedCancelState& ); public: explicit ScopedCancelState( int state = PTHREAD_CANCEL_DISABLE ) { int ret = pthread_setcancelstate( state, &m_oldstate ); if( __builtin_expect(ret,0) ) throw SystemError( ret, "ScopedCancelState: failed" ); } ~ScopedCancelState() { int ret = pthread_setcancelstate( m_oldstate, NULL ); if( __builtin_expect(ret,0) ) LogErr<0>( ret, "~ScopedCancelState: failed" ); } void Restore() { int ret = pthread_setcancelstate( m_oldstate, NULL ); if( __builtin_expect(ret,0) ) throw SystemError( ret, "ScopedCancelState::Restore failed" ); } }; //}}} #endif // _REENTRANT } #endif // _BB_REFPTR_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/secret-sink.h0000644000000000000000000001406214136173163016615 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2014 - 2018, Ron #ifndef _BB_SECRET_SINK_H #define _BB_SECRET_SINK_H #include #include #include namespace BitB { class SecretSink : public RefCounted { //{{{ public: typedef RefPtr< SecretSink > Handle; typedef std::list< Handle > List; struct Options { //{{{ typedef std::list< Options > List; std::string devpath; size_t block_delay; size_t block_size; size_t bytes; Options() : block_delay( 0 ) , block_size( 65536 ) , bytes( 0 ) {} // We don't need these anymore with the new seedd config parser. // But hang on to them a bit longer in case something else does. #if 0 Options( const std::string &path, size_t bd = 0, size_t bs = 65536, size_t n = 0 ) : devpath( path ) , block_delay( bd ) , block_size( bs ) , bytes( n ) {} static Options ParseOptArg( const std::string &arg ) { //{{{ // Creates an Options struct from a string of the form: // path:delay:block_size:total_bytes // where everything except the path portion is optional. size_t n = arg.find( ':' ); if( n == std::string::npos ) return Options( arg ); std::string path = arg.substr( 0, n ); ++n; size_t n2 = arg.find( ':', n ); if( n2 == std::string::npos ) return Options( path, StrToScaledUL(arg.substr(n)) ); size_t delay = StrToScaledUL( arg.substr(n, n2 - n) ); n = n2 + 1; n2 = arg.find( ':', n ); if( n2 == std::string::npos ) return Options( path, delay, StrToScaledUL(arg.substr(n), 1024) ); size_t block = StrToScaledUL( arg.substr(n, n2 - n), 1024 ); n = n2 + 1; return Options( path, delay, block, StrToScaledUL(arg.substr(n), 1024) ); } //}}} #endif }; //}}} private: Options m_options; HealthMonitor m_qa; int m_fd; pthread_t m_thread; void do_read_thread() { //{{{ SetThreadName( stringprintf("QA %s", m_options.devpath.c_str()).c_str() ); Log<3>( "SecretSink( %s ): begin read_thread\n", m_options.devpath.c_str() ); uint8_t buf[ m_options.block_size ]; size_t bytes = 0; size_t n = 0; for(;;) { while( n < m_options.block_size ) { ssize_t r = read( m_fd, buf + n, m_options.block_size - n ); if( r < 0 ) throw SystemError( _("SecretSink( %s )::read( %zu ) failed"), m_options.devpath.c_str(), m_options.block_size - n ); if( r == 0 ) throw Error( _("SecretSink( %s )::read EOF"), m_options.devpath.c_str() ); n += size_t(r); } m_qa.Check( buf, n ); bytes += n; n = 0; if( m_options.bytes && bytes >= m_options.bytes ) { Log<3>( "SecretSink( %s ): read_thread completed, read %zu bytes\n", m_options.devpath.c_str(), bytes ); return; } if( m_options.block_delay ) usleep( useconds_t(m_options.block_delay * 1000) ); } } //}}} static void *read_thread( void *p ) { //{{{ SecretSink *s = static_cast( p ); try { s->do_read_thread(); } catch( const abi::__forced_unwind& ) { Log<3>( "SecretSink( %s ): read_thread cancelled\n", s->m_options.devpath.c_str() ); throw; } BB_CATCH_STD( 0, _("uncaught SecretSink::read_thread exception") ) return NULL; } //}}} public: SecretSink( const Options &options ) : m_options( options ) , m_qa( m_options.devpath ) { //{{{ Log<2>( "+ SecretSink( '%s' )\n", m_options.devpath.c_str() ); m_fd = open( m_options.devpath.c_str(), O_RDONLY ); if( m_fd < 0 ) throw SystemError( _("SecretSink: failed to open '%s'"), m_options.devpath.c_str() ); int ret = pthread_create( &m_thread, GetDefaultThreadAttr(), read_thread, this ); if( ret ) { close( m_fd ); throw SystemError( ret, _("SecretSink( %s ) failed to create thread"), m_options.devpath.c_str() ); } } //}}} ~SecretSink() { //{{{ Log<2>( "- SecretSink( '%s' )\n", m_options.devpath.c_str() ); Log<3>( "SecretSink( %s ): terminating read_thread\n", m_options.devpath.c_str() ); pthread_cancel( m_thread ); Log<3>( "SecretSink( %s ): waiting for read_thread termination\n", m_options.devpath.c_str() ); pthread_join( m_thread, NULL ); close( m_fd ); } //}}} }; //}}} } // BitB namespace #endif // _BB_SECRET_SINK_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/secret-source.h0000644000000000000000000025614514136173163017163 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2010 - 2021, Ron #ifndef _BB_SECRET_SOURCE_H #define _BB_SECRET_SOURCE_H #include #include #if EM_PLATFORM_LINUX #include #include #endif #if defined(__FreeBSD__) && __FreeBSD__ < 13 #include #endif #if EM_PLATFORM_MAC #include #endif #if HAVE_LIBUDEV // Prior to udev-150 their headers don't wrap this for C++, so do it here until // the buggy distro releases catch up. FC12 and CentOS 6, I'm looking at you. extern "C" { #include } #endif #define BB_VENDOR_ID FTDI_VENDOR_ID #define BB_PRODUCT_ID 0x7840 #define BB_WHITE_PRODUCTSTR "White RNG" #define BB_BLACK_PRODUCTSTR "Black RNG" // We sample on the rising edge and clock bits out MSB first by default //#define SAMPLE_FALLING_EDGE //#define LSB_FIRST // Sanity check that we only get exactly as much data as we requested. //{{{ // This guards against a class of kernel/USB controller/libusb bugs which could // corrupt correct transfer of the data we request from the hardware - such as // the issues that were seen with how early XHCI drivers handled short packets // differently to previous generations of USB controllers. We do already deal // with the known problem versions - since some of them may still be used by // people, but we can't test it with every possible system combination nor can // we predict every issue that may occur in future versions. But we can make a // good effort to detect any such problems and shut down the device until they // are properly understood and fixes or mitigations for them are in place too. // // You could somewhat safely disable this test if you've satisified yourself // that it really won't fire on the combination of hardware and software that // you are using - but it's really only worth even thinking about that if you // need to squeeze every last bit per second out of the hardware, which most // people simply won't ever really need to do. And the current test is quite // cheap to run. //}}} #define CHECK_EXCESS_BYTES namespace BitB { class BitBabbler : public FTDI { //{{{ private: static const unsigned FTDI_INIT_RETRIES = 20; unsigned m_enable_mask; unsigned m_disable_pol; unsigned m_bitrate; unsigned m_fold; unsigned m_sleep_init; // in milliseconds unsigned m_sleep_max; unsigned m_suspend_after; bool m_no_qa; void init_device() { //{{{ for( unsigned retries = FTDI_INIT_RETRIES; retries; --retries ) { if( retries < FTDI_INIT_RETRIES ) LogMsg<2>("BitBabbler::init_device: retrying"); if( ! InitMPSSE() ) continue; unsigned clk_div = 30000000 / m_bitrate - 1; const uint8_t cmd[] = { MPSSE_NO_CLK_DIV5, MPSSE_NO_ADAPTIVE_CLK, MPSSE_NO_3PHASE_CLK, MPSSE_SET_DATABITS_LOW, #ifdef SAMPLE_FALLING_EDGE uint8_t( 0x01 | m_disable_pol ), // all outputs low, clk high. #else uint8_t( 0x00 | m_disable_pol ), // CLK, DO, CS, outputs low #endif // masking polarity as per config uint8_t( 0x0B | m_enable_mask ), // Set CLK, DO, CS, as outputs, // generator mask as per config MPSSE_SET_DATABITS_HIGH, 0x00, // all outputs low 0x00, // set all pins as inputs MPSSE_SET_CLK_DIVISOR, uint8_t( clk_div & 0xFF ), // divisor low byte uint8_t( clk_div >> 8 ), // divisor high byte MPSSE_NO_LOOPBACK, }; try { WriteCommand( cmd, sizeof(cmd) ); } BB_CATCH_ALL( 0, _("BitBabbler::init_device: set clock failed"), continue; ) // Wait another 30ms for all of this to settle too usleep(30000); // Clear the (empty) return from the WriteCommand() purge_read(); return; } ThrowError( _("BitBabbler::init_device failed") ); } //}}} public: typedef RefPtr< BitBabbler > Handle; struct Options { //{{{ typedef std::list< Options > List; USBContext::Device::ID id; unsigned enable_mask; unsigned disable_polarity; unsigned bitrate; unsigned chunksize; unsigned latency; unsigned fold; unsigned group; unsigned sleep_init; // in milliseconds unsigned sleep_max; unsigned suspend_after; bool no_qa; Options() : enable_mask( 0x0f ) , disable_polarity( 0x00 ) , bitrate( 0 ) , chunksize( 0 ) , latency( unsigned(-1) ) , fold( unsigned(-1) ) , group( 0 ) , sleep_init( 100 ) , sleep_max( 60000 ) , suspend_after( 0 ) , no_qa( false ) {} void SetIdleSleep( const std::string &arg ) { //{{{ size_t n = arg.find(':'); if( n == std::string::npos ) throw Error( _("BitBabbler::Options: invalid idle-sleep argument '%s'"), arg.c_str() ); if( n != 0 ) { try { sleep_init = StrToScaledU( arg.substr(0, n) ); } catch( const std::exception &e ) { throw Error( _("BitBabbler::Options: invalid idle-sleep init '%s': %s"), arg.c_str(), e.what() ); } } if( n + 1 < arg.size() ) { try { sleep_max = StrToScaledU( arg.substr(n + 1) ); } catch( const std::exception &e ) { throw Error( _("BitBabbler::Options: invalid idle-sleep max '%s': %s"), arg.c_str(), e.what() ); } } if( sleep_max && sleep_init > sleep_max ) throw Error( _("BitBabbler::Options: invalid idle-sleep, init %u > max %u"), sleep_init, sleep_max ); } //}}} }; //}}} static unsigned RealBitrate( unsigned bitrate ) { //{{{ if( bitrate >= 30000000 ) return 30000000; if( bitrate <= 458 ) return 458; return 30000000 / (30000000 / bitrate); } //}}} private: unsigned choose_bitrate( const Options &opt ) { //{{{ if( opt.bitrate ) return RealBitrate( opt.bitrate ); return 2500000; } //}}} unsigned choose_folding( const Options &opt ) { //{{{ if( opt.fold != unsigned(-1) ) return opt.fold; if( GetProduct() == BB_WHITE_PRODUCTSTR ) return 1; if( GetProduct() == BB_BLACK_PRODUCTSTR ) return 3; ThrowError( _("BitBabbler: unknown product '%s', and folding not set"), GetProduct().c_str() ); #if EM_PLATFORM_MSW // Really we'll never get here, but mingw-w64 4.9.2-21+15.4 // appears to be too stupid to figure that out ... return 0; #endif } //}}} public: BitBabbler( const USBContext::Device::Handle &dev, const Options &options = Options(), bool claim_now = true ) : FTDI( dev, false ) , m_enable_mask( ~options.enable_mask << 4 & 0xf0 ) , m_disable_pol( options.disable_polarity << 4 & 0xf0 ) , m_bitrate( choose_bitrate(options) ) , m_fold( choose_folding(options) ) , m_sleep_init( options.sleep_init ) , m_sleep_max( options.sleep_max ) , m_suspend_after( options.suspend_after ) , m_no_qa( options.no_qa ) { //{{{ if( options.bitrate == m_bitrate ) LogMsg<2>( "+ BitBabbler( bitrate %u, fold %u, mask 0x%02x [%02x] )", m_bitrate, m_fold, options.enable_mask, m_enable_mask ); else LogMsg<2>( "+ BitBabbler( bitrate %u (%u), fold %u, mask 0x%02x [%02x] )", options.bitrate, m_bitrate, m_fold, options.enable_mask, m_enable_mask ); unsigned maxpacket = GetMaxPacketSize(); // Select the chunk size to be the largest power of 2 between the // maximum packet size and 64kB that will take less than 250ms to // transfer (which is then the maximum time we'll block waiting to // perform an orderly exit). size_t chunksize = std::max( maxpacket, std::min( options.chunksize ? options.chunksize : 65536u, powof2_down(m_bitrate / 32 / maxpacket * maxpacket) ) ); // Select the latency to avoid timing out and returning a short packet // before the full chunksize can be transferred. This isn't actually // the fastest way to get lots of data out of the device, but it does // significantly minimise the CPU load because we don't spin hard doing // many small transactions to get a whole chunk out. unsigned latency = std::max( 1u, std::min( 255u, maxpacket * 8000 / m_bitrate + 2 ) ); // The above computes what should be our theoretical optimum latency, // ie. the amount of time it would take to completely fill a packet // of the maximum allowable size. That in theory, should give us the // best throughput, since it requires the least number of transactions // to complete the transfer. In practice however, it appears that we // can wring a bit more speed out with a lower latency than that, even // when it means every packet ends up 'short' - though it comes at the // cost of significantly increasing CPU usage as the number of packets // required can increase drastically. This appears to be true even if // the 'performance' CPU governor is used, though it still may be a // function of just not letting the CPU throttle down as much by simply // keeping it busy more of the time. // // For a BitBabbler White in the default configuration, the latency // required to fill a 64kB request is 3ms. Decreasing that to 1ms is // worth about 2MB/hr in the rate of data which we can read from one. // Which is less than half a percent improvement, and the extra CPU // time cost is disproportionately larger than that - but it may still // be a useful speed up to want for some uses cases. (By comparison, // on the system I tested this on, using the performance governor was // worth about an extra 2% in output rate, or about 10MB/hr over what // was seen with the default 'powersave' option.) // // So we default to making efficient use of the CPU, but allow people // to override the latency if they do want speed over everything else. if( options.latency != unsigned(-1) ) latency = options.latency; chunksize = SetChunkSize( chunksize ); SetLatency( latency ); LogMsg<3>( "Chunk size %zu, %zu ms/per chunk (latency %u ms, max packet %u)", chunksize, chunksize * 8000 / m_bitrate, latency, maxpacket ); if( claim_now ) Claim(); } //}}} ~BitBabbler() { LogMsg<2>( "- BitBabbler" ); Release(); } virtual bool Claim() { //{{{ if( ! FTDI::Claim() ) return false; init_device(); return true; } //}}} virtual void Release() { //{{{ ResetBitmode(); FTDI::Release(); } //}}} unsigned GetBitrate() const { return m_bitrate; } unsigned GetFolding() const { return m_fold; } unsigned GetIdleSleepInit() const { return m_sleep_init; } unsigned GetIdleSleepMax() const { return m_sleep_max; } unsigned GetSuspendAfter() const { return m_suspend_after; } bool NoQA() const { return m_no_qa; } size_t read( uint8_t *buf, size_t len ) { //{{{ if( __builtin_expect( len < 1 || len > 65536, 0 ) ) throw Error( _("BitBabbler::read( %zu ): invalid length"), len ); const uint8_t cmd[] = { #ifdef LSB_FIRST #ifdef SAMPLE_FALLING_EDGE MPSSE_DATA_BYTE_IN_NEG_LSB, #else MPSSE_DATA_BYTE_IN_POS_LSB, #endif #else // MSB first #ifdef SAMPLE_FALLING_EDGE MPSSE_DATA_BYTE_IN_NEG_MSB, #else MPSSE_DATA_BYTE_IN_POS_MSB, #endif #endif uint8_t((len - 1) & 0xFF), uint8_t((len - 1) >> 8), MPSSE_SEND_IMMEDIATE }; unsigned reset_attempts = 0; try { WriteCommand( cmd, sizeof(cmd) ); goto ok; } catch( const abi::__forced_unwind& ) { throw; } BB_CATCH_STD( 0, stringprintf("BitBabbler::read( %zu ) exception", len).c_str() ) while( ++reset_attempts < FTDI_INIT_RETRIES ) { // We shouldn't ever get here in normal operation, but if for some // reason things back up and the USB write request fails, try once // to reset the device and reissue the request before bailing out. // // We could also be here if the read repeatedly returns no data, // which could happen if the command written was somehow corrupted // in which case resetting the device is also the only way we can // be sure of what state it is in. We bound the maximum number of // times that may be attempted while no error is being reported by // any of these operations, but any real or permanent error should // normally result in bailing out before that limit is reached. LogMsg<1>( "BitBabbler::read( %zu ): attempting to reset device", len ); FTDI::Claim(); init_device(); WriteCommand( cmd, sizeof(cmd) ); ok: LogMsg<6>( "BitBabbler::read( %zu ): wrote request", len ); size_t count = 0; size_t n = 0; do { size_t ret = ftdi_read( buf + count, len - count ); if( __builtin_expect( ret > 0, 1 ) ) { LogMsg<6>( "BitBabbler::read( %zu ): read %zu (n = %zu)", len, ret, n ); count += ret; if( __builtin_expect( count == len, 1 ) ) { // This is just to create buffer bloat errors, // mostly for testing the purge recovery code. //WriteCommand( cmd, sizeof(cmd) ); #ifdef CHECK_EXCESS_BYTES if( __builtin_expect(GetReadAhead() != 0 || GetLineStatus() != (FTDI_THRE | FTDI_TEMT), 0) ) { size_t ra = GetReadAhead(); unsigned ls = GetLineStatus(); ret = ftdi_read( buf, len ); throw Error( _("BitBabbler::read( %zu ): Uh Oh excess data. " "Buffered %zu, line status 0x%02x [%s ]"), len, ra, ls, OctetsToHex( OctetString( buf, std::min(ret, size_t(8)) ) ).c_str() ); } #endif return len; } n = 0; } } while( ++n < FTDI_READ_RETRIES ); LogMsg<1>( _("BitBabbler::read( %zu ) failed (n = %zu)"), len, n ); } throw Error( _("BitBabbler::read( %zu ) failed after %u reset attempts"), len, reset_attempts ); } //}}} }; //}}} class Pool : public RefCounted { //{{{ public: struct Options { //{{{ size_t pool_size; std::string kernel_device; unsigned kernel_refill_time; // in seconds Options() : pool_size( 65536 ) , kernel_device( "/dev/random" ) , kernel_refill_time( 60 ) {} std::string Str() const { return stringprintf( "Size %zu, Kernel dev '%s', refill time %us", pool_size, kernel_device.c_str(), kernel_refill_time ); } }; //}}} class Group : public RefCounted { //{{{ public: typedef unsigned ID; typedef uint32_t Mask; typedef RefPtr< Group > Handle; typedef was_tr1::unordered_map< ID, Handle > Map; struct Options { //{{{ typedef std::list< Options > List; Group::ID groupid; size_t size; Options( const char *arg ) { char *e; unsigned long v = strtoul( arg, &e, 10 ); if( e == arg || *e != ':' || v > unsigned(-1) ) throw Error( _("Invalid --group-size option '%s'"), arg ); groupid = Group::ID(v); size = StrToScaledUL( e + 1, 1024 ); } }; //}}} private: Pool *m_pool; ID m_id; size_t m_size; uint8_t *m_buf; Mask m_filled; Mask m_mask; unsigned m_members; pthread_mutex_t m_mutex; public: Group( Pool *p, ID group_id, size_t size ) : m_pool( p ) , m_id( group_id ) , m_size( powof2_up(size) ) , m_buf( new uint8_t[m_size] ) , m_filled( 0 ) , m_mask( 0 ) , m_members( 0 ) { Log<2>( "+ Pool::Group( %u, %zu )\n", m_id, m_size ); pthread_mutex_init( &m_mutex, NULL ); } ~Group() { Log<2>( "- Pool::Group( %u, %zu )\n", m_id, m_size ); pthread_mutex_destroy( &m_mutex ); delete [] m_buf; } ID GetID() const { return m_id; } size_t GetSize() const { return m_size; } Mask GetNextMask() { //{{{ ScopedMutex lock( &m_mutex ); if( m_id == 0 ) return 0; for( Mask i = 1; i; i <<= 1 ) { if( (m_mask & i) == 0 ) { m_mask |= i; m_members++; return i; } } throw Error( _("Pool::Group %u is full"), m_id ); } //}}} void ReleaseMask( Mask i ) { //{{{ ScopedMutex lock( &m_mutex ); if( m_id == 0 ) return; if( (m_mask & i) == 0 ) { // This shouldn't ever happen, but we don't want to // corrupt the m_members count if it somehow does. // We don't want to throw, since we're probably in // a destructor (or similar unwinding) somewhere. Log<0>( _("Pool::Group(%u)::ReleaseMask( %x ): " "mask was not allocated (%x)\n"), m_id, i, m_mask ); return; } m_mask &= ~i; m_filled &= ~i; m_members--; } //}}} void AddEntropy( Mask m, uint8_t *b, size_t len ) { //{{{ if( len == 0 ) return; // This should never happen if( len != m_size ) throw Error( _("Pool::Group(%u:%x)::AddEntropy: len %zu != group size %zu"), m_id, m, len, m_size ); ScopedMutex lock( &m_mutex ); if( m_id == 0 || m_members == 1 ) { // short-circuit directly to the main pool if there is // only one source in this group (or if this is group 0). m_filled = 0; lock.Unlock(); m_pool->AddEntropy( b, len ); return; } if( ! m_filled ) { memcpy( m_buf, b, len ); m_filled = m; } else { for( size_t i = 0; i < len; ++i ) m_buf[i] ^= b[i]; m_filled |= m; } Log<5>("Group %u:%x: filled %x\n", m_id, m, m_filled); if( m_filled == m_mask ) { uint8_t buf[m_size]; memcpy( buf, m_buf, m_size ); m_filled = 0; lock.Unlock(); m_pool->AddEntropy( buf, m_size ); } } //}}} }; //}}} private: struct Source : public RefCounted { //{{{ typedef RefPtr< Source > Handle; typedef std::list< Handle > List; Pool *pool; uint8_t *buf; size_t size; Group::Handle group; Group::Mask groupmask; BitBabbler::Handle babbler; pthread_t thread; Source( Pool *p, const Group::Handle &g, const BitBabbler::Handle &b ) : pool( p ) , size( g->GetSize() * (1u << b->GetFolding()) ) , group( g ) , groupmask( g->GetNextMask() ) , babbler( b ) { Log<2>( "+ Pool::Source( %u:%u, %zu, %s )\n", group->GetID(), groupmask, size, babbler->GetSerial().c_str() ); if( size < babbler->GetChunkSize() ) throw Error( _("Pool::Source( %u:%u, %s ): size %zu < chunksize %zu"), group->GetID(), groupmask, babbler->GetSerial().c_str(), size, babbler->GetChunkSize() ); buf = new uint8_t[size]; // Bump the refcount until the thread is started, otherwise we // may lose a race with this Source being released by the caller // before the thread can take its handle from the raw pointer. // Think of it as a virtual Handle passed with pthread_create. Ref(); // We don't need to Unref() if this fails, because we'll throw // and it will never have been constructed to be destroyed ... int ret = pthread_create( &thread, GetDefaultThreadAttr(), Pool::source_thread, this ); if( ret ) { group->ReleaseMask( groupmask ); delete [] buf; throw SystemError( ret, _("Pool::Source: failed to create thread") ); } } ~Source() { Log<2>( "- Pool::Source( %u:%u, %zu, %s )\n", group->GetID(), groupmask, size, babbler->GetSerial().c_str() ); group->ReleaseMask( groupmask ); delete [] buf; } }; //}}} struct WriteFD : public RefCounted { //{{{ typedef RefPtr< WriteFD > Handle; typedef void (*Completion)(void *user_data); Pool *pool; int fd; size_t len; Completion completion_handler; void *user_data; WriteFD( Pool *p, int fd, size_t len, Completion handler = NULL, void *user_data = NULL ) : pool( p ) , fd( fd ) , len( len ) , completion_handler( handler ) , user_data( user_data ) {} }; //}}} typedef std::list< pthread_t > ThreadList; const Options m_opt; uint8_t *m_buf; size_t m_fill; size_t m_next; Group::Map m_groups; Source::List m_sources; ThreadList m_threads; pthread_mutex_t m_mutex; pthread_cond_t m_sourcecond; pthread_cond_t m_sinkcond; // You must hold m_mutex to call this bool PoolIsFull_() { return m_fill == m_opt.pool_size; } bool PoolIsFull() { ScopedMutex lock( &m_mutex ); return PoolIsFull_(); } void AddEntropy( uint8_t *buf, size_t len ) { //{{{ if( len == 0 ) return; ScopedMutex lock( &m_mutex ); size_t n = 0; if( m_fill < m_opt.pool_size ) { size_t b = std::min( m_opt.pool_size - m_fill, len ); Log<5>( "Pool::AddEntropy: add %zu / %zu octets at %zu / %zu\n", b, len, m_fill, m_opt.pool_size ); memcpy( m_buf + m_fill, buf, b ); n = b; m_fill += b; pthread_cond_broadcast( &m_sinkcond ); } while( n < len ) { size_t b = std::min( m_opt.pool_size - m_next, len - n ); Log<5>( "Pool::AddEntropy: mix %zu / %zu octets at %zu / %zu\n", b, len, m_next, m_opt.pool_size ); for( size_t i = 0; i < b; ++i ) m_buf[m_next + i] ^= buf[n + i]; n += b; m_next += b; if( m_next >= m_opt.pool_size ) m_next = 0; } } //}}} void detach_source( const Source::Handle &s ) { //{{{ ScopedMutex lock( &m_mutex ); for( Source::List::iterator i = m_sources.begin(), e = m_sources.end(); i != e; ++ i ) { if( *i == s ) { m_sources.erase( i ); pthread_detach( s->thread ); return; } } } //}}} BB_NORETURN void do_source_thread( const Source::Handle &s ) { //{{{ // All times here are in milliseconds. // - MIN_SLEEP is the minimum timeout before we will actually sleep. // - MAX_SLEEP is the longest duration we will actually sleep for, // with 0 meaning sleep indefinitely once MIN_SLEEP is exceeded. // - INITIAL_SLEEP is the duration we start doubling from once the // Pool is full, with 0 meaning sleep indefinitely immediately. static const unsigned MIN_SLEEP = 512; static const unsigned MAX_SLEEP = s->babbler->GetIdleSleepMax(); static const unsigned INITIAL_SLEEP = s->babbler->GetIdleSleepInit(); static const unsigned SUSPEND_AFTER = s->babbler->GetSuspendAfter(); SetThreadName( s->babbler->GetSerial().substr(0,15) ); s->babbler->LogMsg<3>( "Pool: begin source_thread (idle sleep %u:%u, suspend %u)", INITIAL_SLEEP, MAX_SLEEP, SUSPEND_AFTER ); // At rates of 5Mbps or greater, wait for the first Ent8 test results // before declaring the source is generating an acceptable quality of // entropy. Below that let it come online if the FIPS tests aren't // rejecting it, with at least 20 consecutive blocks having passed. HealthMonitor qa( s->babbler->GetSerial(), s->babbler->GetBitrate() < 5000000 ); size_t read_size = s->babbler->GetChunkSize(); unsigned fold = s->babbler->GetFolding(); bool no_qa = s->babbler->NoQA(); unsigned sleep_for = 0; for(;;) try { s->babbler->Claim(); for(;;) { if( __builtin_expect( sleep_for == unsigned(-1), 0 ) ) { // Sleep until we're explicitly woken by the pool being read from. ScopedMutex lock( &m_mutex ); if( __builtin_expect( PoolIsFull_(), 1 ) ) { s->babbler->LogMsg<6>( "Pool: source_thread waiting for wakeup" ); if( SUSPEND_AFTER ) s->babbler->Release(); int ret = pthread_cond_wait( &m_sourcecond, &m_mutex ); if( ret ) throw SystemError( ret, "pthread_cond_wait failed: %s", strerror(ret) ); if( SUSPEND_AFTER ) { lock.Unlock(); s->babbler->Claim(); } } } else if( __builtin_expect( sleep_for >= MIN_SLEEP, 0 ) ) { // Sleep until explicitly woken or the timeout expires. timespec wait_until; GetFutureTimespec( wait_until, sleep_for ); ScopedMutex lock( &m_mutex ); if( __builtin_expect( PoolIsFull_(), 1 ) ) { s->babbler->LogMsg<6>( "Pool: source_thread sleeping for %ums", sleep_for ); if( SUSPEND_AFTER && sleep_for >= SUSPEND_AFTER ) s->babbler->Release(); int ret = pthread_cond_timedwait( &m_sourcecond, &m_mutex, &wait_until ); if( ret && ret != ETIMEDOUT ) throw SystemError( ret, "pthread_cond_timedwait failed: %s", strerror(ret) ); if( SUSPEND_AFTER && sleep_for >= SUSPEND_AFTER ) { lock.Unlock(); s->babbler->Claim(); } } } for( size_t p = 0, n = 0; p <= s->size - read_size; p += n ) n = s->babbler->read( s->buf + p, read_size ); size_t n = FoldBytes( s->buf, s->size, fold ); if( __builtin_expect( PoolIsFull(), 0 ) ) { // If the pool is already (still) full, start to throttle back // on how often we keep mixing more new entropy into it. It's // not that it hurts to do that, but there's probably not much // point burning CPU cycles to do so as fast as possible while // nobody is actually consuming what we alredy have. if( sleep_for == 0 ) { sleep_for = INITIAL_SLEEP ? INITIAL_SLEEP : unsigned(-1); } else if( sleep_for < MIN_SLEEP || sleep_for < MAX_SLEEP ) { sleep_for *= 2; if( MAX_SLEEP && sleep_for > MAX_SLEEP ) sleep_for = MAX_SLEEP; } else if( MAX_SLEEP == 0 ) { sleep_for = unsigned(-1); } } else sleep_for = 0; // Don't idle if the QA check failed. We want to find out as quickly // as possible if that was just a transient spike outside the limits // (which being random is always possible, however rare it may be), // and bring the device back on line if that's what it appears to be. // If it stays bad, then the sysadmin is going to need to take some // action of their own in response to the alert, and this likewise // will ensure they have as much data as possible, as quickly as // possible to base that decision on. if( __builtin_expect( qa.Check( s->buf, n ) || no_qa, 1 ) ) s->group->AddEntropy( s->groupmask, s->buf, n ); else sleep_for = 0; } } catch( const USBError &e ) { // Don't warn about enum values not being explicitly handled here, // we don't want to have to chase every new error code added to // libusb that we don't explicitly care about handling here. EM_PUSH_DIAGNOSTIC_IGNORE("-Wswitch-enum") switch( e.GetErrorCode() ) { case LIBUSB_ERROR_PIPE: s->babbler->LogMsg<1>( "Pool source_thread caught (device %sclaimed): %s", s->babbler->IsClaimed() ? "": "un", e.what() ); s->babbler->Release(); break; case LIBUSB_ERROR_TIMEOUT: case LIBUSB_ERROR_OTHER: s->babbler->LogMsg<1>( "Pool source_thread caught: %s", e.what() ); s->babbler->SoftReset(); s->babbler->FTDI::Release(); break; default: throw; } EM_POP_DIAGNOSTIC } } //}}} static void *source_thread( void *p ) { //{{{ Source::Handle s = static_cast( p ); // Drop the 'virtual handle' from the ctor, we have a real one now. s->Unref(); try { s->pool->do_source_thread( s ); } catch( const abi::__forced_unwind& ) { s->babbler->LogMsg<3>( "Pool: source_thread cancelled" ); throw; } BB_CATCH_STD( 0, s->babbler->MsgStr( _("uncaught source_thread exception") ).c_str() ) s->pool->detach_source( s ); return NULL; } //}}} void detach_thread( pthread_t p ) { //{{{ ScopedMutex lock( &m_mutex ); for( ThreadList::iterator i = m_threads.begin(), e = m_threads.end(); i != e; ++i ) { if( *i == p ) { m_threads.erase( i ); pthread_detach( p ); return; } } } //}}} static void *writefd_thread( void *p ) { //{{{ WriteFD::Handle w = static_cast( p ); SetThreadName( "write fd" ); try { try { w->pool->WriteToFD( w->fd, w->len ); Log<3>( "Pool: writefd_thread completed\n" ); } catch( const abi::__forced_unwind& ) { Log<3>( "Pool: writefd_thread cancelled\n" ); throw; } BB_CATCH_STD( 0, _("uncaught writefd_thread exception") ) if( w->completion_handler ) w->completion_handler( w->user_data ); } catch( const abi::__forced_unwind& ) { Log<3>( "Pool: writefd_thread cancelled\n" ); throw; } BB_CATCH_STD( 0, _("uncaught writefd_thread completion exception") ) w->pool->detach_thread( pthread_self() ); return NULL; } //}}} static void *feedkernel_thread( void *p ) { //{{{ Pool *pool = static_cast( p ); SetThreadName( "kernel pool" ); try { Log<3>( "Pool: begin feedkernel_thread\n" ); pool->FeedKernelEntropy(); } catch( const abi::__forced_unwind& ) { Log<3>( "Pool: feedkernel_thread cancelled\n" ); throw; } BB_CATCH_STD( 0, _("uncaught feedkernel_thread exception") ) pool->detach_thread( pthread_self() ); return NULL; } //}}} public: typedef RefPtr< Pool > Handle; Pool( const Options &options = Options() ) : m_opt( options ) , m_fill( 0 ) , m_next( 0 ) { //{{{ Log<2>( "+ Pool( %s )\n", m_opt.Str().c_str() ); m_buf = new uint8_t[m_opt.pool_size]; pthread_mutex_init( &m_mutex, NULL ); pthread_cond_init( &m_sourcecond, NULL ); pthread_cond_init( &m_sinkcond, NULL ); } //}}} ~Pool() { //{{{ pthread_mutex_lock( &m_mutex ); Log<3>( "Pool: terminating threads\n" ); for( ThreadList::iterator i = m_threads.begin(), e = m_threads.end(); i != e; ++i ) pthread_cancel( *i ); Log<3>( "Pool: terminating sources\n" ); for( Source::List::iterator i = m_sources.begin(), e = m_sources.end(); i != e; ++ i ) pthread_cancel( (*i)->thread ); #if HAVE_ABI_FORCED_UNWIND Log<3>( "Pool: waiting for thread termination\n" ); while( ! m_threads.empty() ) { pthread_t p = m_threads.back(); m_threads.pop_back(); pthread_mutex_unlock( &m_mutex ); pthread_join( p, NULL ); pthread_mutex_lock( &m_mutex ); } Log<3>( "Pool: waiting for source termination\n" ); while( ! m_sources.empty() ) { pthread_t p = m_sources.back()->thread; m_sources.pop_back(); pthread_mutex_unlock( &m_mutex ); pthread_join( p, NULL ); pthread_mutex_lock( &m_mutex ); } pthread_mutex_unlock( &m_mutex ); #else // This isn't entirely "safe", new threads could be created or new // sources added while we are tearing down the existing set, but in // the case where we don't have full stack unwinding when a thread // is cancelled, there are other things which are much more likely // to be in an awkward state than this. We shouldn't be here in // "normal" operation anyway, only if we are in the process of a // controlled termination, so we can get away with crash diving our // way out of things to some extent. // // The main thing here is not to try to take the mutex again once // we unlock it, because a rudely cancelled thread might still be // holding it if the ScopedMutex wasn't properly unwound. ThreadList t; Source::List s; t.swap( m_threads ); s.swap( m_sources ); pthread_mutex_unlock( &m_mutex ); Log<3>( "Pool: waiting for thread termination\n" ); for( ThreadList::iterator i = t.begin(), e = t.end(); i != e; ++ i ) pthread_join( *i, NULL ); Log<3>( "Pool: waiting for source termination\n" ); for( Source::List::iterator i = s.begin(), e = s.end(); i != e; ++ i ) pthread_join( (*i)->thread, NULL ); #endif pthread_cond_destroy( &m_sinkcond ); pthread_cond_destroy( &m_sourcecond ); pthread_mutex_destroy( &m_mutex ); delete [] m_buf; Log<2>( "- Pool( %s )\n", m_opt.Str().c_str() ); } //}}} // Group size will be rounded up to a power of 2 void AddGroup( Group::ID group_id, size_t size ) { //{{{ Log<2>( "Pool::AddGroup( %u, %zu )\n", group_id, size ); ScopedMutex lock( &m_mutex ); Group::Map::iterator i = m_groups.find( group_id ); if( i != m_groups.end() ) throw Error( _("Pool::AddGroup( %u, %zu ): group already exists"), group_id, size ); m_groups[group_id] = new Group( this, group_id, size ); } //}}} void AddSource( Group::ID group_id, const BitBabbler::Handle &babbler ) { //{{{ babbler->LogMsg<2>( "Pool::AddSource: adding to group %u", group_id ); ScopedMutex lock( &m_mutex ); Group::Map::iterator gi = m_groups.find( group_id ); Group::Handle g; if( gi == m_groups.end() ) { g = new Group( this, group_id, m_opt.pool_size ); m_groups[group_id] = g; } else { g = gi->second; } m_sources.push_back( new Source( this, g, babbler ) ); } //}}} void RemoveSource( const USBContext::Device::Handle &d ) { //{{{ ScopedMutex lock( &m_mutex ); for( Source::List::iterator i = m_sources.begin(), e = m_sources.end(); i != e; ++ i ) { if( (*i)->babbler->IsDevice( d ) ) { pthread_t p = (*i)->thread; m_sources.erase( i ); lock.Unlock(); pthread_cancel( p ); #if defined(__FreeBSD__) && __FreeBSD__ < 13 //{{{ // On FreeBSD 11 (where hotplug support was first added) // if the device was removed while we were in an active // call to libusb_bulk_transfer(), then that call may // deadlock (typically somewhere in libusb_handle_events) // and we will never be able to join the cancelled thread. // // So to avoid it also deadlocking us, we limit the amount // of time we'll wait for the join to succeed, and if that // elapses, we'll bark about it, then just leak the thread // and its associated resources, and move on. // // It's not ideal, but it's about the best we can do until // the FreeBSD side of things is fixed. // // Since it looks like FreeBSD 13 doesn't have this problem // anymore, it can use the same code as everyone else again. Log<4>( "Pool::RemoveSource: cancelling thread for %s\n", d->VerboseStr().c_str() ); timespec wait_until; GetFutureTimespec( wait_until, 2000 ); int ret = pthread_timedjoin_np( p, NULL, &wait_until ); if( ret ) LogErr<0>( ret, "Pool::RemoveSource: failed to join thread for removed device %s", d->VerboseStr().c_str() ); else Log<4>( "Pool::RemoveSource: joined thread for %s\n", d->VerboseStr().c_str() ); //}}} #else pthread_join( p, NULL ); #endif return; } } } //}}} void RemoveAllSources() { //{{{ Source::List s; { ScopedMutex lock( &m_mutex ); for( Source::List::iterator i = m_sources.begin(), e = m_sources.end(); i != e; ++ i ) pthread_cancel( (*i)->thread ); s.swap( m_sources ); } // Block until they are actually released, so that the caller has some // guarantee that it's safe to use removed sources for something else, // without racing against the removal still being completed here. for( Source::List::iterator i = s.begin(), e = s.end(); i != e; ++ i ) pthread_join( (*i)->thread, NULL ); } //}}} // Will block until it can return min(len,poolsize) octets size_t read( uint8_t *buf, size_t len ) { //{{{ Log<5>( "Pool::read( %zu )\n", len ); ScopedMutex lock( &m_mutex ); while( m_fill < m_opt.pool_size && m_fill < len ) pthread_cond_wait( &m_sinkcond, &m_mutex ); size_t n = std::min( m_fill, len ); memcpy( buf, m_buf + (m_fill - n), n ); m_fill -= n; pthread_cond_broadcast( &m_sourcecond ); Log<5>( "Pool::read( %zu ) returning %zu (%zu remain)\n", len, n, m_fill ); return n; } //}}} void WriteToFD( int fd, size_t len = 0 ) { //{{{ uint8_t buf[65536]; for(;;) { size_t b = len ? std::min( len, sizeof(buf) ) : sizeof(buf); size_t n = read( buf, b ); for( size_t c = n; c; ) { ssize_t w = write( fd, buf + n - c, c ); if( w < 0 ) throw SystemError( _("Pool::WriteToFD( %d ) failed"), fd ); if( w == 0 ) throw Error( _("Pool::WriteToFD( %d ) EOF"), fd ); c -= size_t(w); } if( len && (len -= n) == 0 ) return; } } //}}} void WriteToFDAsync( int fd, size_t len = 0, WriteFD::Completion handler = NULL, void *user_data = NULL ) { //{{{ ScopedMutex lock( &m_mutex ); WriteFD *w = new WriteFD( this, fd, len, handler, user_data ); pthread_t p; int ret = pthread_create( &p, GetDefaultThreadAttr(), writefd_thread, w ); if( ret ) { delete w; throw SystemError( ret, _("Pool::WriteToFDAsync( %d, %zu ): " "failed to create thread"), fd, len ); } m_threads.push_back( p ); } //}}} BB_NORETURN void FeedKernelEntropy( const std::string &dev = std::string() ) { //{{{ #if !EM_PLATFORM_LINUX // This is normally defined in linux/random.h, but we'll reuse it // here as a generic struct to share as much of the code below as // we reasonably can between all platforms. struct rand_pool_info { int entropy_count; int buf_size; uint32_t buf[]; }; #endif #if EM_PLATFORM_LINUX || EM_PLATFORM_MAC // Each time we wake up, either because the kernel pool has fallen // below its watermark or due to the timer expiring, we read a full // FIPS 140-2 analysis block (20k bits) from our own pool, and if // it passes the QA checks we then fold it twice to give us 5k bits // to pass to the kernel. We then pass it through the QA tests yet // again, partly to be completely paranoid, and partly so that we // can query the QA statistics at that stage too, and if it still // passes, we give it to the kernel. // // If all is working well, the QA testing feeding our pool should // ensure that it has very near to 8 bits of entropy per byte, so // for each iteration here we take ~20k bits of entropy, fold it // to 5k bits for the kernel to process, of which with its default // pool size it will credit us for at most 4096 bits of entropy. // That should mean the estimate of 8 bits of entropy per byte that // we tell it we are providing should be reasonable even if we are // a hair below that in the worst case. int fd = open( dev.empty() ? m_opt.kernel_device.c_str() : dev.c_str(), O_RDWR ); if( fd < 0 ) throw SystemError( _("Pool::FeedKernelEntropy: failed to open %s"), dev.empty() ? m_opt.kernel_device.c_str() : dev.c_str() ); const unsigned N = QA::FIPS::BUFFER_SIZE; // 20kbits for FIPS test. const unsigned folds = 2; const int timeout = int(m_opt.kernel_refill_time) ? int(m_opt.kernel_refill_time * 1000) : -1; union { uint8_t b[N + sizeof(struct rand_pool_info)]; struct rand_pool_info rpi; }; uint8_t *buf = reinterpret_cast( rpi.buf ); uint8_t b2[N]; size_t b2_fill = 0; HealthMonitor qa( "Pool" ); HealthMonitor qa2( "Kernel" ); bool source_ok; bool folded_ok = false; for(;;) { size_t n; do { n = read( buf, N ); if( ! (source_ok = qa.Check( buf, n )) ) { b2_fill = 0; continue; } n = FoldBytes( buf, n, folds ); memcpy( b2 + b2_fill, buf, n ); b2_fill += n; if( b2_fill >= N ) { b2_fill = 0; folded_ok = qa2.Check( b2, N ); } } while( ! source_ok || ! folded_ok ); rpi.entropy_count = int(n * 8); rpi.buf_size = int(n); #if EM_PLATFORM_LINUX if( ioctl( fd, RNDADDENTROPY, &rpi ) ) throw SystemError( _("Pool::FeedKernelEntropy: ioctl failed") ); EM_TRY_PUSH_DIAGNOSTIC_IGNORE("-Wgnu-designator") struct pollfd p = { fd: fd, events: POLLOUT, revents: 0 }; int r = poll( &p, 1, timeout ); if( r < 0 ) throw SystemError( _("Pool::FeedKernelEntropy: poll failed") ); EM_POP_DIAGNOSTIC #elif EM_PLATFORM_MAC // MacOS has no method of signalling to us when its kernel might // actually want more entropy, so we'll just feed it a new block // each time the kernel_refill_time expires. ssize_t r = write( fd, buf, n ); if( r < 0 ) throw SystemError( _("Pool::FeedKernelEntropy: write to kernel device failed") ); usleep( timeout * 1000 ); #else // We should only be here if we added extra platforms to the // outer condition and didn't actually add them here too ... #error "You forgot to implement this didn't you ..." #endif } #else (void)dev; throw Error("Kernel entropy not supported on this platform"); #endif } //}}} void FeedKernelEntropyAsync() { //{{{ ScopedMutex lock( &m_mutex ); pthread_t p; int ret = pthread_create( &p, GetDefaultThreadAttr(), feedkernel_thread, this ); if( ret ) throw SystemError( ret, _("Pool::FeedKernelEntropyAsync: " "failed to create thread") ); m_threads.push_back( p ); } //}}} }; //}}} class DevList : public USBContext { //{{{ private: unsigned m_vendorid; unsigned m_productid; pthread_mutex_t m_pool_mutex; Pool::Handle m_pool; BitBabbler::Options m_default_options; BitBabbler::Options::List m_device_options; protected: virtual void DeviceAdded( const Device::Handle &d ) { //{{{ ScopedMutex lock( &m_pool_mutex ); if( ! m_pool ) return; if( d->GetSerial().empty() ) { // We shouldn't be here in normal use, the user would need to // explicitly assign a pool to a monitor that enumerated bare // (or broken) devices ... so, bark and then ignore them. Log<0>( _("DevList::DeviceAdded: " "not adding device with no serial number to the pool: %s\n"), d->VerboseStr().c_str() ); return; } if( m_device_options.empty() ) { m_pool->AddSource( m_default_options.group, new BitBabbler( d, m_default_options, false ) ); return; } for( BitBabbler::Options::List::iterator i = m_device_options.begin(), e = m_device_options.end(); i != e; ++i ) { if( i->id.Matches( d ) ) { m_pool->AddSource( i->group, new BitBabbler( d, *i, false ) ); return; } } } //}}} virtual void DeviceRemoved( const Device::Handle &d ) { //{{{ ScopedMutex lock( &m_pool_mutex ); if( m_pool != NULL ) m_pool->RemoveSource( d ); } //}}} public: DevList( unsigned vendorid, unsigned productid ) : m_vendorid( vendorid ) , m_productid( productid ) { pthread_mutex_init( &m_pool_mutex, NULL ); } virtual ~DevList() { pthread_mutex_destroy( &m_pool_mutex ); } void AddDevicesToPool( const Pool::Handle &pool, const BitBabbler::Options &default_options, const BitBabbler::Options::List &device_options ) { //{{{ { ScopedMutex lock( &m_pool_mutex ); if( m_pool != NULL ) m_pool->RemoveAllSources(); m_pool = pool; m_default_options = default_options; m_device_options = device_options; } WarmplugAllDevices(); } //}}} unsigned GetVendorID() const { return m_vendorid; } unsigned GetProductID() const { return m_productid; } std::string GetVendorIDStr() const { return stringprintf("%04x", m_vendorid); } std::string GetProductIDStr() const { return stringprintf("%04x", m_productid); } }; //}}} #if HAVE_LIBUDEV class UDEVMonitor : public DevList { //{{{ private: static const unsigned m_qsize = 4096; struct udev *m_udev; struct udev_monitor *m_mon; pthread_t m_actor_thread; pthread_t m_monitor_thread; pthread_mutex_t m_action_mutex; pthread_cond_t m_action_cond; struct udev_device *m_deviceq[ m_qsize ]; unsigned m_deviceq_rd; unsigned m_deviceq_wr; void dump_udev_data( struct udev_device *d ) { //{{{ const char *action = udev_device_get_action( d ); const char *sysname = udev_device_get_sysname( d ); const char *driver = udev_device_get_driver( d ); const char *subsys = udev_device_get_subsystem( d ); const char *devtype = udev_device_get_devtype( d ); const char *devpath = udev_device_get_devpath( d ); const char *devnode = udev_device_get_devnode( d ); const char *syspath = udev_device_get_syspath( d ); const char *vendorid = udev_device_get_sysattr_value( d, "idVendor" ); const char *productid = udev_device_get_sysattr_value( d, "idProduct" ); const char *serial = udev_device_get_sysattr_value( d, "serial" ); // const char *vendorid = udev_device_get_property_value( d, "ID_VENDOR_ID" ); // const char *productid = udev_device_get_property_value( d, "ID_MODEL_ID" ); // const char *serial = udev_device_get_property_value( d, "ID_SERIAL_SHORT" ); Log<0>( "action: %s, sysname: %s, driver: %s\n", action, sysname, driver ); Log<0>( "subsys: %s, devtype: %s, devpath: %s\n", subsys, devtype, devpath ); Log<0>( "devnode: %s, syspath: %s\n", devnode, syspath ); Log<0>( "vendorid: %s, productid: %s, serial: %s\n", vendorid, productid, serial ); // This function was added in udev release 167. #if HAVE_UDEV_DEVICE_GET_SYSATTR_LIST_ENTRY struct udev_list_entry *attrlist, *a; attrlist = udev_device_get_sysattr_list_entry( d ); udev_list_entry_foreach( a, attrlist) { const char *key = udev_list_entry_get_name( a ); if( ! key ) continue; const char *val = udev_device_get_sysattr_value( d, key ); Log<0>( " attribute %s = '%s'\n", key, val ); } #endif struct udev_list_entry *proplist, *p; proplist = udev_device_get_properties_list_entry( d ); udev_list_entry_foreach( p, proplist ) { const char *key = udev_list_entry_get_name( p ); if( ! key ) continue; const char *val = udev_list_entry_get_value( p ); Log<0>( " property %s = '%s'\n", key, val ); } // This function was added in udev release 154. #if HAVE_UDEV_DEVICE_GET_TAGS_LIST_ENTRY struct udev_list_entry *taglist, *t; taglist = udev_device_get_tags_list_entry( d ); udev_list_entry_foreach( t, taglist ) { const char *key = udev_list_entry_get_name( t ); if( ! key ) continue; const char *val = udev_list_entry_get_value( t ); Log<0>( " tag %s = '%s'\n", key, val ); } #endif } //}}} private: Device::Handle new_device( unsigned vendorid, unsigned productid, unsigned busnum, unsigned devnum, const std::string &mfg, const std::string &product, const std::string &serial, const std::string &devport, const std::string &devpath ) { //{{{ Device::Handle d = find_device( busnum, devnum ); if( ! d ) { // This should only happen if we lose some race. // Either the device was unplugged again before we got here, // or libusb somehow doesn't know that it exists yet ... Log<0>( _("UDEVMonitor: failed to find device %03u:%03u\n"), busnum, devnum ); return NULL; } if( d->GetVendorID() != vendorid || d->GetProductID() != productid ) { // This really shouldn't ever happen ... Log<0>( _("UDEVMonitor: device matched devnum %03u:%03u " "but mismatched vendor:product, %04x:%04x != %04x:%04x\n"), busnum, devnum, vendorid, productid, d->GetVendorID(), d->GetProductID() ); return NULL; } // This shouldn't normally ever happen, but it can if the EEPROM was // rewritten or corrupted in some way. Warn because possibly the // device just needs to be re-enumerated to correct the kernel's idea // of what is really there. It can also happen if the user calling // this does not have permission to actually read from the device. if( mfg != d->GetManufacturer() || product != d->GetProduct() || serial != d->GetSerial() ) { Log<1>( _("UDEVMonitor: expecting mfg '%s', product '%s', serial '%s', " "but device %03u:%03u returned '%s', '%s', '%s'\n"), mfg.c_str(), product.c_str(), serial.c_str(), busnum, devnum, d->GetManufacturer().c_str(), d->GetProduct().c_str(), d->GetSerial().c_str() ); // On the assumption that most of the time this will actually be // just a permission problem, if the attempt to read the strings // from the device returned an empty value, set them to the values // we read from udev because they are probably correct, and this // is a lot let confusing to an unprivileged user who just wants // to see what devices are available. // // If they try to do something more than that it will fail anyway // because they really won't have permission for it. if( d->GetManufacturer().empty() ) d->SetManufacturer( mfg ); if( d->GetProduct().empty() ) d->SetProduct( product ); if( d->GetSerial().empty() ) d->SetSerial( serial ); } if( d->GetDevicePort().empty() ) d->SetDevicePort( devport ); else if( devport != d->GetDevicePort() ) Log<0>( _("UDEVMonitor: udev says device has port '%s', but libusb reported '%s'\n"), devport.c_str(), d->GetDevicePort().c_str() ); d->SetDevpath( devpath ); return d; } //}}} Device::Handle new_device( unsigned vendorid, unsigned productid, const char *busnum, const char *devnum, const char *mfg, const char *product, const char *serial, const char *devport, const char *devpath ) { //{{{ return new_device( vendorid, productid, StrToU( busnum ? busnum : "", 10 ), StrToU( devnum ? devnum : "", 10 ), mfg ? mfg : "", product ? product : "", serial ? serial : "", devport ? devport : "", devpath ? devpath : "" ); } //}}} // Run the queue of device notifications. BB_NORETURN void __actor_thread() { //{{{ SetThreadName( "hotplug event" ); std::string vid = GetVendorIDStr(); std::string pid = GetProductIDStr(); for(;;) { while( m_deviceq_wr - m_deviceq_rd > 0 ) { // There is a window here, where if we get cancelled, a few // new device notifications may have already been queued. // We could install a fancier cleanup handler, to catch any // that we might otherwise leak - but since right now, this // thread should only be exiting if the whole app is being // torn down, we'll just let the Big Reaper get them for us. pthread_testcancel(); ScopedCancelState cancelstate; struct udev_device *d = m_deviceq[m_deviceq_rd++ % m_qsize]; if( opt_verbose > 3 ) dump_udev_data( d ); const char *action = udev_device_get_action( d ); const char *devpath = udev_device_get_devpath( d ); if( ! action || ! devpath ) { // Ensure strcmp won't explode if udev is braindead Log<0>( _("UDEVMonitor: event with no action or devpath\n") ); } else if( strcmp(action, "remove") == 0 ) { RemoveDeviceByDevpath( devpath ); } else if( strcmp(action, "add") == 0 ) { // Alternatively, but less reliably: //const char *vendorid = udev_device_get_property_value( d, "ID_VENDOR_ID" ); //const char *productid = udev_device_get_property_value( d, "ID_MODEL_ID" ); const char *vendorid = udev_device_get_sysattr_value( d, "idVendor" ); const char *productid = udev_device_get_sysattr_value( d, "idProduct" ); if( vendorid && productid && vendorid == vid && productid == pid ) { //const char *serial = udev_device_get_property_value( d, "ID_SERIAL_SHORT" ); const char *serial = udev_device_get_sysattr_value( d, "serial" ); try { //const char *mfg = udev_device_get_property_value( d, "ID_VENDOR" ); //const char *product = udev_device_get_property_value( d, "ID_MODEL" ); const char *mfg = udev_device_get_sysattr_value( d, "manufacturer" ); const char *product = udev_device_get_sysattr_value( d, "product" ); const char *busnum = udev_device_get_sysattr_value( d, "busnum" ); const char *devnum = udev_device_get_sysattr_value( d, "devnum" ); const char *devport = udev_device_get_sysattr_value( d, "devpath" ); Device::Handle h = new_device( GetVendorID(), GetProductID(), busnum, devnum, mfg, product, serial, devport, devpath ); if( h != NULL ) AddDevice( h ); } BB_CATCH_ALL( 0, _("UDEVMonitor: add event exception") ) } } udev_device_unref( d ); } ScopedMutex lock( &m_action_mutex ); pthread_cond_wait( &m_action_cond, &m_action_mutex ); } } //}}} // Wait for udev to chirp at us. BB_NORETURN void __monitor_thread() { //{{{ SetThreadName( "udev monitor" ); for(;;) { struct udev_device *d = udev_monitor_receive_device( m_mon ); if( ! d ) continue; ScopedCancelState cancelstate; if( m_deviceq_wr - m_deviceq_rd < m_qsize ) { m_deviceq[m_deviceq_wr % m_qsize] = d; m_deviceq_wr++; ScopedMutex lock( &m_action_mutex ); pthread_cond_broadcast( &m_action_cond ); } else { Log<0>( _("UDEVMonitor: *** queue full, packet dropped ***\n") ); udev_device_unref( d ); } } } //}}} static void *actor_thread( void *p ) { //{{{ try { static_cast(p)->__actor_thread(); } catch( const abi::__forced_unwind& ) { Log<3>( "UDEVMonitor: actor_thread cancelled\n" ); throw; } BB_CATCH_STD( 0, _("uncaught actor_thread exception") ) return NULL; } //}}} static void *monitor_thread( void *p ) { //{{{ try { static_cast(p)->__monitor_thread(); } catch( const abi::__forced_unwind& ) { Log<3>( "UDEVMonitor: monitor_thread cancelled\n" ); throw; } BB_CATCH_STD( 0, _("uncaught monitor_thread exception") ) return NULL; } //}}} public: UDEVMonitor( unsigned vendorid = BB_VENDOR_ID, unsigned productid = BB_PRODUCT_ID ) : DevList( vendorid, productid ) , m_udev( udev_new() ) , m_mon( NULL ) , m_deviceq_rd( 0 ) , m_deviceq_wr( 0 ) { //{{{ Log<2>( "+ UDEVMonitor( %04x:%04x )\n", vendorid, productid ); if( ! m_udev ) throw Error( _("UDEVMonitor: failed to create udev context") ); try { m_mon = udev_monitor_new_from_netlink( m_udev, "udev" ); if( ! m_mon ) throw Error( _("UDEVMonitor: failed to create udev monitor") ); // Let the monitor socket block, it has its own thread here int monfd = udev_monitor_get_fd( m_mon ); int socketflags = fcntl( monfd, F_GETFL ); if( socketflags == -1 ) throw SystemError( _("UDEVMonitor: failed to get monitor fd flags") ); if( fcntl( monfd, F_SETFL, socketflags & ~O_NONBLOCK ) == -1 ) throw SystemError( _("UDEVMonitor: failed to clear monitor fd O_NONBLOCK") ); udev_monitor_filter_add_match_subsystem_devtype( m_mon, "usb", "usb_device" ); if( udev_monitor_enable_receiving( m_mon ) ) throw Error( _("UDEVMonitor: failed to bind udev monitor") ); struct udev_enumerate *e = udev_enumerate_new( m_udev ); struct udev_list_entry *devlist, *d; if( ! e ) throw Error( _("UDEVMonitor: failed to create enum context") ); udev_enumerate_add_match_sysattr( e, "idVendor", GetVendorIDStr().c_str() ); udev_enumerate_add_match_sysattr( e, "idProduct", GetProductIDStr().c_str() ); udev_enumerate_scan_devices( e ); devlist = udev_enumerate_get_list_entry( e ); udev_list_entry_foreach( d, devlist ) { const char *syspath = udev_list_entry_get_name( d ); struct udev_device *ud = udev_device_new_from_syspath( m_udev, syspath ); if( ! ud ) { Log<0>( _("UDEVMonitor: failed to get device from syspath '%s'\n"), syspath ); continue; } if( opt_verbose > 3 ) dump_udev_data( ud ); // Alternatively, but less reliably: //const char *serial = udev_device_get_property_value( ud, "ID_SERIAL_SHORT" ); //const char *mfg = udev_device_get_property_value( ud, "ID_VENDOR" ); //const char *product = udev_device_get_property_value( ud, "ID_MODEL" ); const char *serial = udev_device_get_sysattr_value( ud, "serial" ); const char *mfg = udev_device_get_sysattr_value( ud, "manufacturer" ); const char *product = udev_device_get_sysattr_value( ud, "product" ); const char *busnum = udev_device_get_sysattr_value( ud, "busnum" ); const char *devnum = udev_device_get_sysattr_value( ud, "devnum" ); const char *devport = udev_device_get_sysattr_value( ud, "devpath" ); const char *devpath = udev_device_get_devpath( ud ); try { if( devpath ) { Device::Handle h = new_device( GetVendorID(), GetProductID(), busnum, devnum, mfg, product, serial, devport, devpath ); if( h != NULL ) AddDevice( h ); } else { // Guard against udev being braindead Log<0>( _("UDEVMonitor: device '%s' with no devpath\n"), syspath ); } } catch( ... ) { udev_device_unref( ud ); udev_enumerate_unref( e ); throw; } udev_device_unref( ud ); } udev_enumerate_unref( e ); // Now wait in the background for things to change pthread_mutex_init( &m_action_mutex, NULL ); pthread_cond_init( &m_action_cond, NULL ); const pthread_attr_t *attr = GetDefaultThreadAttr(); int ret = pthread_create( &m_actor_thread, attr, actor_thread, this ); if( ret ) { pthread_cond_destroy( &m_action_cond ); pthread_mutex_destroy( &m_action_mutex ); throw SystemError( ret, _("UDEVMonitor: failed to create actor thread") ); } ret = pthread_create( &m_monitor_thread, attr, monitor_thread, this ); if( ret ) { pthread_cancel( m_actor_thread ); pthread_join( m_actor_thread, NULL ); pthread_cond_destroy( &m_action_cond ); pthread_mutex_destroy( &m_action_mutex ); throw SystemError( ret, _("UDEVMonitor: failed to create monitor thread") ); } } catch( ... ) { if( m_mon ) udev_monitor_unref( m_mon ); if( m_udev ) udev_unref( m_udev ); throw; } } //}}} virtual ~UDEVMonitor() { //{{{ if( m_mon ) { Log<3>( "UDEVMonitor( %04x:%04x ): halting monitor threads\n", GetVendorID(), GetProductID() ); pthread_cancel( m_actor_thread ); pthread_cancel( m_monitor_thread ); pthread_join( m_actor_thread, NULL ); pthread_join( m_monitor_thread, NULL ); udev_monitor_unref( m_mon ); } if( m_udev ) udev_unref( m_udev ); pthread_cond_destroy( &m_action_cond ); pthread_mutex_destroy( &m_action_mutex ); Log<2>( "- UDEVMonitor( %04x:%04x )\n", GetVendorID(), GetProductID() ); } //}}} virtual bool HasHotplugSupport() const { return true; } }; //}}} typedef UDEVMonitor Devices; #else // ! HAVE_LIBUDEV class DeviceList : public DevList { //{{{ private: #if LIBUSB_SINCE(0x01000102) // Only available since 1.0.16 libusb_hotplug_callback_handle m_callbackhandle; static int LIBUSB_CALL hotplug_callback( libusb_context *ctx, libusb_device *dev, libusb_hotplug_event event, void *user_data ) { //{{{ (void)ctx; DeviceList *devlist = static_cast( user_data ); switch( event ) { case LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED: { Device::Handle d = new Device( dev ); devlist->AddDevice( d ); break; } case LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT: devlist->RemoveDevice( dev ); break; default: Log<0>( _("DeviceList( %04x:%04x ): unexpected hotplug event type %d\n"), devlist->GetVendorID(), devlist->GetProductID(), event ); } return 0; } //}}} #endif public: DeviceList( unsigned vendorid = BB_VENDOR_ID, unsigned productid = BB_PRODUCT_ID ) : DevList( vendorid, productid ) { //{{{ Log<2>( "+ DeviceList( %04x:%04x )\n", vendorid, productid ); #if LIBUSB_SINCE(0x01000102) // Only available since 1.0.16 // Ensure this isn't uninitialised if registering the callback fails, // since we'll access it in the destructor. Unfortunately we have to // rely on an implementation detail of libusb to do this, since it // doesn't give us a publicly defined 'invalid' handle to use. // // The handle IDs are integers, and as of libusb-1.0.19 at least, the // first valid handle is 1, and subsequent handles simply increment // the ID allocated, so we should be ok until integer wraparound, or // until someone changes how the handles work in that code. m_callbackhandle = 0; // FreeBSD 11 bumped LIBUSB_API_VERSION to 0x01000102, but didn't add // the libusb_has_capability() function ... so if we're here, but // don't have it, let's assume it succeeded until some other platform // explodes in flames to say otherwise. #if HAVE_LIBUSB_HAS_CAPABILITY if( libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG) ) #endif { #if defined(__FreeBSD__) && __FreeBSD__ < 13 // On FreeBSD 11, we're seeing a 4 second delay between when we // register the hotplug callback, and when it actually gets called // to enumerate any already available devices. In "normal" use, // that probably isn't a major problem, but in the case where we // are only requested to scan for available devices, that means // we won't have any to report before it returns (without also // doing a semi-random sleep to wait for these events). So until // we know whatever is causing that is fixed, on FreeBSD let's // enumerate them explictly first, and let AddDevice() deal with // weeding out the duplicates if/when we get them. // // There does seem to be some internal issue there, since we are // also seeing libusb_exit() block for 4 seconds at shutdown too. // // With FreeBSD 13 there is still a 4 second delay in libusb_exit // but devices are enumerated immediately so we don't need to do // this manually anymore. EnumerateDevices( vendorid, productid ); #endif ScopedCancelState cancelstate; int ret = libusb_hotplug_register_callback( GetContext(), libusb_hotplug_event( LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT | LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED ), LIBUSB_HOTPLUG_ENUMERATE, vendorid, productid, LIBUSB_HOTPLUG_MATCH_ANY, // device class hotplug_callback, this, &m_callbackhandle ); if( ret ) LogUSBError<0>( ret, _("DeviceList( %04x:%04x ): failed to register hotplug callback"), vendorid, productid ); else return; } #endif Log<2>( _("DeviceList: hotplug support not available\n") ); // Scan for devices manually if we don't have hotplug support, // or if trying to enable it failed. EnumerateDevices( vendorid, productid ); } //}}} ~DeviceList() { //{{{ Log<2>( "- DeviceList( %04x:%04x )\n", GetVendorID(), GetProductID() ); #if LIBUSB_SINCE(0x01000102) // Only available since 1.0.16 ScopedCancelState cancelstate; libusb_hotplug_deregister_callback( GetContext(), m_callbackhandle ); #endif } //}}} virtual bool HasHotplugSupport() const { //{{{ #if LIBUSB_SINCE(0x01000102) // Only available since 1.0.16 // FreeBSD 11 bumped LIBUSB_API_VERSION to 0x01000102, but didn't add // the libusb_has_capability() function ... so if we're here, but // don't have it, let's assume it succeeded until some other platform // exploded in flames to say otherwise. #if HAVE_LIBUSB_HAS_CAPABILITY return libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG) ? true : false; #else return true; #endif #else return false; #endif } //}}} }; //}}} typedef DeviceList Devices; #endif // HAVE_LIBUDEV } // BitB namespace #endif // _BB_SECRET_SOURCE_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/signals.h0000644000000000000000000001430314136173163016024 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2012 - 2016, Ron #ifndef _BB_SIGNALS_H #define _BB_SIGNALS_H #if EM_PLATFORM_POSIX #include #include #ifdef _REENTRANT #include #endif // This is a kludge for proto-POSIX systems like MacOSX, which still don't // define SIGRTMIN (FreeBSD added it in version 7). It means that we can't // safely use SIGUSR2 for anything else here, but we'll worry about that if // we do ever explicitly need or want it for some other purpose later. #if !HAVE_DECL_SIGRTMIN #define SIGRTMIN SIGUSR2 #endif namespace BitB { // There is no particular reason for these to be static inline, aside from // the fact that they'll mostly only ever be used in just one place in any // given application, so bundling them off into a separate impl file is a // touch on the overkill side right now. If we later package all this up // into a convenience library for applications to use that's probably what // we should do with them though. static inline void BlockSignals( int sig1 = 0, int sig2 = 0, int sig3 = 0, int sig4 = 0, int sig5 = 0, int sig6 = 0 ) { //{{{ sigset_t sigs; if( sig1 == 0 ) { if( sigfillset( &sigs ) == -1 ) throw SystemError( "BlockSignals: failed to fill signal set" ); // SIGKILL, SIGSTOP, SIGCONT and SIGABRT are silently ignored and not blocked. if( sigdelset( &sigs, SIGBUS ) == -1 || sigdelset( &sigs, SIGFPE ) == -1 || sigdelset( &sigs, SIGILL ) == -1 || sigdelset( &sigs, SIGSEGV ) == -1 || sigdelset( &sigs, SIGTRAP ) == -1 ) throw SystemError( "BlockSignals: failed to configure signal set" ); } else { int s[] = { sig1, sig2, sig3, sig4, sig5, sig6, 0 }; if( sigemptyset( &sigs ) == -1 ) throw SystemError( "BlockSignals: failed to clear signal set" ); for( unsigned i = 0; s[i]; ++i ) if( sigaddset( &sigs, s[i] ) == -1 ) throw SystemError( "BlockSignals: failed to add signal %d (%s)", s[i], strsignal(s[i]) ); } #ifdef _REENTRANT int ret = pthread_sigmask( SIG_BLOCK, &sigs, NULL ); if( ret ) throw SystemError( ret, "BlockSignals: failed to mask signals" ); #else if( sigprocmask( SIG_BLOCK, &sigs, NULL ) == -1 ) throw SystemError( "BlockSignals: failed to mask signals" ); #endif } //}}} static inline int FindUnblockedSignal( int sig1 = 0, int sig2 = 0, int sig3 = 0, int sig4 = 0, int sig5 = 0, int sig6 = 0, int sig7 = 0, int sig8 = 0, int sig9 = 0 ) { //{{{ sigset_t sigs; if( sigemptyset( &sigs ) == -1 ) throw SystemError( "FindUnblockedSignal: failed to clear signal set" ); #ifdef _REENTRANT int ret = pthread_sigmask( 0, NULL, &sigs ); if( ret ) throw SystemError( ret, "FindUnblockedSignal: failed to read signal mask" ); #else if( sigprocmask( 0, NULL, &sigs ) == -1 ) throw SystemError( "FindUnblockedSignal: failed to read signal mask" ); #endif int sarg[] = { sig1, sig2, sig3, sig4, sig5, sig6, sig7, sig8, sig9, 0 }; int sall[] = { SIGHUP, SIGINT, SIGQUIT, SIGUSR1, SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGCHLD, SIGTSTP, SIGTTIN, SIGTTOU, SIGURG, SIGXCPU, SIGXFSZ, SIGVTALRM, SIGPROF, SIGWINCH, SIGIO, SIGSYS, #ifdef SIGSTKFLT SIGSTKFLT, #endif #ifdef SIGPWR SIGPWR, #endif #ifdef SIGEMT SIGEMT, #endif #ifdef SIGINFO SIGINFO, #endif 0 }; int *s = sig1 ? sarg : sall; for( unsigned i = 0; s[i]; ++i ) { int result = sigismember( &sigs, s[i] ); if( result == -1 ) throw SystemError( "FindUnblockedSignal: failed to test mask for signal %d (%s)", s[i], strsignal(s[i]) ); if( result == 0 ) return s[i]; } return 0; } //}}} static inline int SigWait( int sig1 = 0, int sig2 = 0, int sig3 = 0, int sig4 = 0, int sig5 = 0, int sig6 = 0, int sig7 = 0, int sig8 = 0, int sig9 = 0 ) { //{{{ sigset_t signals; if( sig1 == 0 ) { int u = FindUnblockedSignal(); if( u ) throw Error( "SigWait: signal %d (%s) is not blocked", u, strsignal(u) ); if( sigfillset( &signals ) == -1 ) throw SystemError( "SigWait: failed to fill signal set" ); } else { int s[] = { sig1, sig2, sig3, sig4, sig5, sig6, sig7, sig8, sig9, 0 }; int u = FindUnblockedSignal( sig1, sig2, sig3, sig4, sig5, sig6, sig7, sig8, sig9 ); if( u ) throw Error( "SigWait: signal %d (%s) is not blocked", u, strsignal(u) ); if( sigemptyset( &signals ) == -1 ) throw SystemError( "SigWait: failed to clear signal set" ); for( unsigned i = 0; s[i]; ++i ) if( sigaddset( &signals, s[i] ) == -1 ) throw SystemError( "SigWait: failed to add signal %d (%s)", s[i], strsignal(s[i]) ); } int sig; int ret = sigwait( &signals, &sig ); if( ret ) throw SystemError( ret, "SigWait: error" ); return sig; } //}}} } #endif // EM_PLATFORM_POSIX #endif // _BB_SIGNALS_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/socket-source.h0000644000000000000000000001422714136173163017157 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2015 - 2018, Ron #ifndef _BB_SOCKET_SOURCE_H #define _BB_SOCKET_SOURCE_H #include #include namespace BitB { class SocketSource : public RefCounted { //{{{ private: #if EM_PLATFORM_MSW WinsockScope m_winsock; #endif Pool::Handle m_pool; SockAddr m_sa; int m_fd; pthread_t m_serverthread; BB_NORETURN void do_server_thread() { //{{{ SetThreadName( "UDP out" ); std::string addr = m_sa.AddrStr(); Log<3>( "SocketSource( %s ): begin server_thread\n", addr.c_str() ); const size_t MAX_BYTES = 32768; union { uint16_t len; char buf[8]; }; uint8_t rbuf[MAX_BYTES]; sockaddr_any_t peeraddr; HealthMonitor qa( "UDP" ); for(;;) { socklen_t peeraddrlen = sizeof( peeraddr.ss ); ssize_t n = recvfrom( m_fd, buf, sizeof(buf), 0, &peeraddr.any, &peeraddrlen ); if( n == 2 ) { size_t bytes = ntohs(len); size_t r; Log<5>( "SocketSource( %s ): request for %zu bytes\n", addr.c_str(), bytes ); if( bytes < 1 || bytes > MAX_BYTES) { Log<2>( _("SocketSource( %s ): ignoring %zd byte request\n"), addr.c_str(), bytes ); continue; } do { r = m_pool->read( rbuf, bytes ); } while( ! qa.Check( rbuf, r ) ); Log<5>( "SocketSource( %s ): returning %zu bytes\n", addr.c_str(), r ); #if EM_PLATFORM_MSW n = sendto( m_fd, reinterpret_cast(rbuf), r, 0, &peeraddr.any, peeraddrlen ); #else n = sendto( m_fd, rbuf, r, 0, &peeraddr.any, peeraddrlen ); #endif if( n == -1 ) LogSocketErr<1>( _("SocketSource( %s ): sendto failed"), addr.c_str() ); else if( size_t(n) != r ) Log<2>( _("SocketSource( %s ): only %zd of %zu bytes sent\n"), addr.c_str(), n, r ); } else if( n == -1 ) LogSocketErr<1>( _("SocketSource( %s ): recvfrom failed"), addr.c_str() ); else Log<2>( _("SocketSource( %s ): ignoring %zd byte message\n"), addr.c_str(), n ); } } //}}} static void *server_thread( void *p ) { //{{{ SocketSource *s = static_cast( p ); try { s->do_server_thread(); } catch( const abi::__forced_unwind& ) { Log<3>( "SocketSource( '%s' ): server_thread cancelled\n", s->m_sa.AddrStr().c_str() ); throw; } BB_CATCH_STD( 0, _("uncaught SocketSource::server_thread exception") ) return NULL; } //}}} void Close() { //{{{ #if EM_PLATFORM_MSW closesocket( m_fd ); #else close( m_fd ); #endif } //}}} public: typedef RefPtr< SocketSource > Handle; SocketSource( const Pool::Handle &pool, const std::string &addr, bool freebind = false ) : m_pool( pool ) , m_sa( addr ) { //{{{ Log<2>( "+ SocketSource( '%s' )\n", addr.c_str() ); m_sa.GetAddrInfo( SOCK_DGRAM, AI_ADDRCONFIG | AI_PASSIVE ); if( m_sa.addr.any.sa_family != AF_INET && m_sa.addr.any.sa_family != AF_INET6 ) throw Error( _("SocketSource( %s ): not an IPv4 or IPv6 address (family %u)"), addr.c_str(), m_sa.addr.any.sa_family ); m_fd = socket( m_sa.addr.any.sa_family, m_sa.addr_type, m_sa.addr_protocol ); if( m_fd == -1 ) throw SocketError( _("SocketSource( %s ): failed to open socket"), addr.c_str() ); try { if( freebind ) EnableFreebind( m_fd, stringprintf("SocketSource( %s )", addr.c_str()) ); if( bind( m_fd, &m_sa.addr.any, m_sa.addr_len ) == -1 ) throw SocketError( _("SocketSource( %s ): bind failed"), addr.c_str() ); int ret = pthread_create( &m_serverthread, GetDefaultThreadAttr(), server_thread, this ); if( ret ) throw SystemError( ret, _("SocketSource( %s ): failed to create server thread"), addr.c_str() ); } catch( ... ) { Close(); throw; } } //}}} ~SocketSource() { //{{{ std::string addr = m_sa.AddrStr(); Log<2>( _("- SocketSource( %s )\n"), addr.c_str() ); Log<3>( _("SocketSource( %s ): terminating server\n"), addr.c_str() ); pthread_cancel( m_serverthread ); Log<3>( _("SocketSource( %s ): waiting for server termination\n"), addr.c_str() ); pthread_join( m_serverthread, NULL ); Close(); } //}}} }; //}}} } // BitB namespace #endif // _BB_SOCKET_SOURCE_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/socket.h0000644000000000000000000002702014136173163015654 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 1998 - 2018, Ron #ifndef _BB_SOCKET_H #define _BB_SOCKET_H #include #if EM_PLATFORM_POSIX #include #include #include #include #include #elif EM_PLATFORM_MSW #include #else #error Unsupported platform #endif namespace BitB { union sockaddr_any_t { struct sockaddr any; // Generic socket address. struct sockaddr_storage ss; // Largest available socket address space. struct sockaddr_in in; // IPv4 domain socket address. struct sockaddr_in6 in6; // IPv6 domain socket address. #if EM_PLATFORM_POSIX struct sockaddr_un un; // Unix domain socket address. #endif }; struct SockAddr { //{{{ std::string host; std::string service; int addr_type; int addr_protocol; socklen_t addr_len; sockaddr_any_t addr; // Parse an address string of the form 'host:service' // where the host part (but not the colon) is optional. // INADDR_ANY is assumed if no host is provided. SockAddr( const std::string &addrstr ) { //{{{ size_t n = addrstr.rfind(':'); if( n != std::string::npos && n + 1 < addrstr.size() ) { service = addrstr.substr( n + 1 ); if( addrstr[0] == '[' && n > 2 ) host = addrstr.substr( 1, n - 2 ); else host = addrstr.substr( 0, n ); } if( service.empty() ) throw Error( _("SockAddr( '%s' ): no service address"), addrstr.c_str() ); } //}}} std::string AddrStr() const { //{{{ if( host.find(':') != std::string::npos ) return '[' + host + "]:" + service; return host + ':' + service; } //}}} void GetAddrInfo( int socktype, int flags ) { //{{{ addrinfo hints; addrinfo *addrinf; memset( &hints, 0, sizeof(addrinfo) ); hints.ai_flags = flags; hints.ai_family = AF_UNSPEC; hints.ai_socktype = socktype; // hints.ai_protocol = 0; // hints.ai_addrlen = 0; // hints.ai_addr = NULL; // hints.ai_canonname = NULL; // hints.ai_next = NULL; int err = ::getaddrinfo( host.empty() ? NULL : host.c_str(), service.c_str(), &hints, &addrinf ); if( err ) throw Error( _("SockAddr( '%s' ): failed to get address: %s"), AddrStr().c_str(), gai_strerror( err ) ); if( addrinf->ai_addrlen > sizeof(sockaddr_storage) ) { freeaddrinfo( addrinf ); throw Error( _("SockAddr( '%s' ): ai_addrlen %ju > sockaddr_storage %zu"), AddrStr().c_str(), uintmax_t(addrinf->ai_addrlen), sizeof(sockaddr_storage) ); } addr_type = addrinf->ai_socktype; addr_protocol = addrinf->ai_protocol; addr_len = addrinf->ai_addrlen; memcpy( &addr.any, addrinf->ai_addr, addr_len ); memset( reinterpret_cast(&addr.any) + addr_len, 0, sizeof(sockaddr_storage) - addr_len ); freeaddrinfo( addrinf ); } //}}} }; //}}} #if EM_PLATFORM_MSW class SocketError : public Error { //{{{ private: int m_errno; char m_errmsg[65536]; char *GetSysMsg() { m_errmsg[0] = '\0'; FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, 0, m_errno, 0, m_errmsg, sizeof(m_errmsg), NULL ); return m_errmsg; } public: SocketError() throw() : m_errno( WSAGetLastError() ) { SetMessage( "Socket Error: %s", GetSysMsg() ); } SocketError( const std::string &msg ) throw() : m_errno( WSAGetLastError() ) { SetMessage( "%s", (msg + ": " + GetSysMsg()).c_str() ); } BB_PRINTF_FORMAT(2,3) SocketError( const char *format, ... ) throw() : m_errno( WSAGetLastError() ) { va_list arglist; va_start( arglist, format ); SetMessage( format, arglist ); va_end( arglist ); AppendMessage( std::string(": ") + GetSysMsg() ); } BB_PRINTF_FORMAT(3,4) SocketError( int code, const char *format, ... ) throw() : m_errno( code ) { va_list arglist; va_start( arglist, format ); SetMessage( format, arglist ); va_end( arglist ); AppendMessage( std::string(": ") + GetSysMsg() ); } int GetErrorCode() const { return m_errno; } }; //}}} template< int N > BB_PRINTF_FORMAT(1,2) void LogSocketErr( const char *format, ... ) { //{{{ char errmsg[65536]; int errnum = WSAGetLastError(); va_list arglist; va_start( arglist, format ); std::string msg = vstringprintf( format, arglist ); if( msg.size() && msg[msg.size() - 1] == '\n' ) msg.erase( msg.size() - 1 ); errmsg[0] = '\0'; FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, 0, errnum, 0, errmsg, sizeof(errmsg), NULL ); Log( "%s: %s\n", msg.c_str(), errmsg ); va_end( arglist ); } //}}} class WinsockScope { //{{{ private: WSADATA m_wsa; public: WinsockScope() { int ret = WSAStartup(MAKEWORD(2,2), &m_wsa); if( ret ) throw Error( "WSAStartup failed with error %d", ret ); } ~WinsockScope() { WSACleanup(); } }; //}}} #else // ! EM_PLATFORM_MSW #define SocketError SystemError #define LogSocketErr LogErr struct WinsockScope { WinsockScope() {} }; #endif static inline void EnableFreebind( int fd, const std::string &where = std::string() ) { //{{{ #ifdef IP_FREEBIND int i = 1; if( setsockopt( fd, IPPROTO_IP, IP_FREEBIND, &i, sizeof(i) ) == -1 ) throw SocketError( _("%s: Failed to set IP_FREEBIND"), where.c_str() ); #elif defined(IP_BINDANY) // FreeBSD variant, requires PRIV_NETINET_BINDANY privilege to enable. int i = 1; if( setsockopt( fd, IPPROTO_IP, IP_BINDANY, &i, sizeof(i) ) == -1 ) throw SocketError( _("%s: Failed to set IP_FREEBIND (IP_BINDANY)"), where.c_str() ); #elif defined(SO_BINDANY) // OpenBSD variant, requires superuser privilege to enable. int i = 1; if( setsockopt( fd, SOL_SOCKET, SO_BINDANY, &i, sizeof(i) ) == -1 ) throw SocketError( _("%s: Failed to set IP_FREEBIND (SO_BINDANY)"), where.c_str() ); #else (void)fd; Log<0>( _("%s: IP_FREEBIND is not supported on this platform\n"), where.c_str() ); #endif } //}}} // Check if systemd is expecting us to acknowledge it. //{{{ // The actually documented guarantees here leave something to be desired, // but if NOTIFY_SOCKET is set in the environment, and contains either an // absolute path, or a string begining with an '@', then it is probably // systemd indicating that it wants notification sent to either a named // unix domain socket or an abstract socket, respectively. The actual // address of the abstract socket is obtained by replacing the @ with a // null character. Though that seems academic, because in practice it // appears to always use a named socket. If something else sets this, // then the caller who did that gets to keep all the pieces ... // // If this returns a non-empty string, the above conditions have been met. //}}} static inline std::string GetSystemdNotifySocket() { //{{{ char *s = getenv("NOTIFY_SOCKET"); if( ! s || (s[0] != '@' && s[0] != '/') || s[1] == '\0' ) return std::string(); return s; } //}}} // Send a notification message to systemd. //{{{ // This will do nothing if the NOTIFY_SOCKET was not set, otherwise it will // try to send the given message to the indicated address and throw if we // aren't able to do that. Since systemd doesn't actually acknowledge our // acknowledgement, there's no way to know if this actually did anything // aside from squirting a datagram out into the void. If it really was // systemd expecting something from us, then it will terminate this process // if it doesn't get a READY message before its timeout expires. It will // also reject the message if the sender's SCM_CREDENTIALS are not included // in the packet sent, but sendto(2) will include those for us, without // needing to bloat the code here with some useless Trying To Look Clever, // and then needing to guard most of that to try and keep it all portable. //}}} static inline void SystemdNotify( const std::string &msg, const std::string &ns = GetSystemdNotifySocket() ) { //{{{ #if EM_PLATFORM_POSIX if( ns.empty() ) return; sockaddr_any_t addr; socklen_t addrlen = socklen_t(offsetof(sockaddr_un, sun_path) + ns.size()); if( ns.size() >= sizeof(addr.un.sun_path) ) throw Error( _("SystemdNotify: socket path '%s' is too long. " "Maximum length is %zu bytes."), ns.c_str(), sizeof(addr.un.sun_path) - 1 ); addr.un.sun_family = AF_UNIX; ns.copy( addr.un.sun_path, sizeof(addr.un.sun_path) - 1 ); addr.un.sun_path[ ns.size() ] = '\0'; // Systemd passes abstract socket addresses with an initial '@', // but Linux identifies them by using a null as the first byte. if( ns[0] == '@' ) addr.un.sun_path[0] = '\0'; int fd = socket( AF_UNIX, SOCK_DGRAM, 0 ); if( fd == -1 ) throw SocketError( _("SystemdNotify( %s, %s ): failed to create socket"), msg.c_str(), ns.c_str() ); ssize_t n = sendto( fd, msg.c_str(), msg.size(), 0, &addr.any, addrlen ); close(fd); if( n < 0 ) throw SocketError( _("SystemdNotify( %s, %s ): failed to send message"), msg.c_str(), ns.c_str() ); if( size_t(n) < msg.size() ) throw Error( _("SystemdNotify( %s, %s ): failed to send entire message" " (only %zd/%zu bytes)"), msg.c_str(), ns.c_str(), n, msg.size() ); #else (void)msg; (void)ns; #endif } //}}} } // BitB namespace #endif // _BB_SOCKET_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/term_escape.h0000644000000000000000000000403014136173163016647 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2014 - 2015, Ron #ifndef _BB_TERM_ESCAPE_H #define _BB_TERM_ESCAPE_H #define BLACK "\x1b[0;30m" #define RED "\x1b[0;31m" #define GREEN "\x1b[0;32m" #define YELLOW "\x1b[0;33m" #define BLUE "\x1b[0;34m" #define PURPLE "\x1b[0;35m" #define CYAN "\x1b[0;36m" #define WHITE "\x1b[0;37m" #define BOLD "\x1b[1m" #define BOLD_BLACK "\x1b[1;30m" #define BOLD_RED "\x1b[1;31m" #define BOLD_GREEN "\x1b[1;32m" #define BOLD_YELLOW "\x1b[1;33m" #define BOLD_BLUE "\x1b[1;34m" #define BOLD_PURPLE "\x1b[1;35m" #define BOLD_CYAN "\x1b[1;36m" #define BOLD_WHITE "\x1b[1;37m" #define MID_GREEN "\x1b[38;5;40m" #define MID_YELLOW "\x1b[38;5;227m" #define MID_ORANGE "\x1b[38;5;214m" #define DARK_RED "\x1b[38;5;88m" #define END_COLOUR "\x1b[0m" #define COLOUR_STR(c,s) c s END_COLOUR #define BLACK_STR(s) COLOUR_STR(BLACK,s) #define RED_STR(s) COLOUR_STR(RED,s) #define GREEN_STR(s) COLOUR_STR(GREEN,s) #define YELLOW_STR(s) COLOUR_STR(YELLOW,s) #define BLUE_STR(s) COLOUR_STR(BLUE,s) #define PURPLE_STR(s) COLOUR_STR(PURPLE,s) #define CYAN_STR(s) COLOUR_STR(CYAN,s) #define WHITE_STR(s) COLOUR_STR(WHITE,s) #define BOLD_BLACK_STR(s) COLOUR_STR(BOLD_BLACK,s) #define BOLD_RED_STR(s) COLOUR_STR(BOLD_RED,s) #define BOLD_GREEN_STR(s) COLOUR_STR(BOLD_GREEN,s) #define BOLD_YELLOW_STR(s) COLOUR_STR(BOLD_YELLOW,s) #define BOLD_BLUE_STR(s) COLOUR_STR(BOLD_BLUE,s) #define BOLD_PURPLE_STR(s) COLOUR_STR(BOLD_PURPLE,s) #define BOLD_CYAN_STR(s) COLOUR_STR(BOLD_CYAN,s) #define BOLD_WHITE_STR(s) COLOUR_STR(BOLD_WHITE,s) #define COLOUR_STR_IF(cond,col,s) (cond) ? col s END_COLOUR : s #endif // _BB_TERM_ESCAPE_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/unicode.h0000644000000000000000000001204214136173163016010 0ustar //////////////////////////////////////////////////////////////////// // //! @file unicode.h //! @ingroup UTFhelpers //! @brief Functions for handling Unicode. // // Copyright 2013 - 2016, Ron // This file is distributed as part of the bit-babbler package. // //////////////////////////////////////////////////////////////////// #ifndef _BB_UNICODE_H #define _BB_UNICODE_H #include #include // We could jump through some extra hoops here to support insane systems // but since they should really just die in a fire, and since the whole // point of the code here is to avoid having to use heavy machinery like // iconv for the simple conversions, just bark at them for now ... // // This is only really a problem for the wstring functions at present, // if we ever come to depend on C++11, then we could use u32string and // the char32_t types which are explicitly UTF-32, and/or the u16string // and char16_t types which are explicitly UTF-16. //#define USE_APPEND_WSTRING_AS_UTF8 #ifdef USE_APPEND_WSTRING_AS_UTF8 // Since we don't actually need the wstring version of AppendAsUTF8 in // the BitBabbler code anywhere, just disable it for windows users. #ifndef __STDC_ISO_10646__ #error "The wchar_t type on this system is not UTF-32. You're hosed." #endif #endif namespace BitB { //! @defgroup UTFhelpers Unicode support //! @brief Miscellaneous functions for handling Unicode. //! @ingroup Strings //!@{ //! @name UTF-8 conversion //@{ //{{{ //! Append a UTF-32 codepoint as UTF-8 octets to the string @a s //{{{ //! If the codepoint is outside the valid UTF-32 range (greater than //! 0x10FFFF) then it will simply be ignored. //}}} static inline void AppendAsUTF8( std::string &s, uint32_t codepoint ) { //{{{ if( codepoint < 0x80 ) { s.push_back( char(codepoint) ); } else if( codepoint < 0x0800 ) { s.push_back( char(0xc0 | (codepoint >> 6)) ); s.push_back( char(0x80 | (codepoint & 0x3f)) ); } else if( codepoint < 0x10000 ) { s.push_back( char(0xe0 | (codepoint >> 12)) ); s.push_back( char(0x80 | ((codepoint >> 6) & 0x3f)) ); s.push_back( char(0x80 | (codepoint & 0x3f)) ); } else if( codepoint < 0x110000 ) { s.push_back( char(0xf0 | (codepoint >> 18)) ); s.push_back( char(0x80 | ((codepoint >> 12) & 0x3f)) ); s.push_back( char(0x80 | ((codepoint >> 6) & 0x3f)) ); s.push_back( char(0x80 | (codepoint & 0x3f)) ); } } //}}} #ifdef USE_APPEND_WSTRING_AS_UTF8 //! Append a UTF-32 string as UTF-8 octets to the string @a dest static inline const std::string &AppendAsUTF8( std::string &dest, const std::wstring &src ) { //{{{ for( std::wstring::const_iterator i = src.begin(), e = src.end(); i != e; ++i ) AppendAsUTF8( dest, uint32_t(*i) ); return dest; } //}}} #endif //@} //}}} //! @name UTF-16 surrogate pair support //@{ //{{{ //! Return @c true if the 16 bit @a value is a valid UTF-16 leading surrogate static inline bool IsUTF16LeadingSurrogate( uint16_t value ) { return (value & 0xfc00) == 0xd800; } //! Return @c true if the 16 bit @a value is a valid UTF-16 trailing surrogate static inline bool IsUTF16TrailingSurrogate( uint16_t value ) { return (value & 0xfc00) == 0xdc00; } //! Convert a UTF-16 surrogate pair to a UTF-32 codepoint //{{{ //! @param lead The leading surrogate //! @param trail The trailing surrogate //! //! @return The UTF-32 codepoint //! //! @note This function does nothing to validate the surrogate pair. //! It assumes the caller has already done that. // // http://www.unicode.org/faq/utf_bom.html#utf16-4 //}}} static inline uint32_t UTF16SurrogateToUTF32( uint16_t lead, uint16_t trail ) { const uint32_t surrogate_offset = uint32_t(0x10000 - (0xD800u << 10) - 0xDC00); return (uint32_t(lead) << 10) + trail + surrogate_offset; } //! Convert a UTF-32 codepoint to a UTF-16 surrogate pair //{{{ //! @param codepoint The UTF-32 codepoint to convert //! @param lead The 16 bit leading surrogate value //! @param trail The 16 bit trailing surrogate value //! //! @note This function does nothing to validate if the @a codepoint needs //! to be encoded as a surrogate pair. It assumes the caller has //! already determined the codepoint is greater than 0xFFFF. //}}} static inline void UTF32toUTF16Surrogate( uint32_t codepoint, uint16_t &lead, uint16_t &trail ) { const uint16_t lead_offset = 0xD800 - (0x10000u >> 10); lead = uint16_t(lead_offset + (codepoint >> 10)); trail = 0xDC00 + (codepoint & 0x3FF); } //@} //}}} //!@} } // BitB namespace #endif // _BB_UNICODE_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/unordered_map.h0000644000000000000000000000136414136173163017213 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2017, Ron #ifndef _BB_UNORDERED_MAP_H #define _BB_UNORDERED_MAP_H // Hide the implementation detail of whether we have std::unordered_map // or std::tr1::unordered_map available on the current platform. // Code should #include and use the alias // namespace was_tr1::unordered_map wherever needed. #if HAVE_UNORDERED_MAP #include namespace was_tr1 = std; #elif HAVE_TR1_UNORDERED_MAP #include namespace was_tr1 = std::tr1; #else #error "No unordered map type supported on this platform" #endif #endif // _BB_UNORDERED_MAP_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/usbcontext.h0000644000000000000000000020426714136173163016574 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2010 - 2018, Ron #ifndef _BB_USBCONTEXT_H #define _BB_USBCONTEXT_H #include #if EM_PLATFORM_MSW // We don't actually need this (socket support) here, but the crazy windows API // requires winsock2.h to be included before windows.h, and the libusb.h header // includes the latter - not because it actually needs it, but because they do // a horrible hack to avoid horrible windows redefining the word 'interface' ... #include #endif #include LIBUSB_HEADER #if EM_PLATFORM_LINUX #include // for reading device capabilities #include #include #endif #include #include #include #include #include #ifdef LIBUSBX_API_VERSION #define LIBUSB_SINCE(x) ((LIBUSBX_API_VERSION) >= (x)) #elif defined(LIBUSB_API_VERSION) #define LIBUSB_SINCE(x) ((LIBUSB_API_VERSION) >= (x)) #else #define LIBUSB_SINCE(x) 0 #endif // We can't use LIBUSB_API_VERSION to test for this, because the version wasn't // changed when this was added - so it's back to the tried and tested method of // testing for features in autoconf. It is a bit fast and loose for the other // things we are currently wrapping with it too - but at least those will just // hide some functions that might otherwise be present and not break the build // like this one did on Wheezy with 1.0.11 #if ! HAVE_LIBUSB_STRERROR static inline const char * LIBUSB_CALL libusb_strerror(enum libusb_error errcode) { return libusb_error_name(errcode); } #endif namespace BitB { // Exception class for errors from libusb class USBError : public Error { //{{{ private: libusb_error m_usberr; public: BB_PRINTF_FORMAT(3,4) USBError( int code, const char *format, ... ) throw() : m_usberr( libusb_error(code) ) { va_list arglist; va_start( arglist, format ); SetMessage( vstringprintf( format, arglist ) + ": " + libusb_strerror(m_usberr) ); va_end( arglist ); } libusb_error GetErrorCode() const { return m_usberr; } }; //}}} // Manage a libusb context and the devices associated with it class USBContext { //{{{ private: USBContext( const USBContext& ); USBContext &operator=( const USBContext& ); public: // A USB vendor and product ID identifier //{{{ // This can be passed as a string of the form VVVV:PPPP, where V and P // are the hexadecimal vendor and product IDs respectively. Either of // the parts may be omitted or set to 0, in which case it will signify // that part should match any ID (but the ':' must always be included). //}}} struct ProductID { //{{{ private: void parse_id_string( const std::string &id ) { //{{{ // Leave it with an 'invalid' ID if this fails. vid = 0xFFFF; pid = 0xFFFF; size_t n = id.find(':'); if( n == std::string::npos || id.size() > 9 ) throw Error( _("Invalid product ID '%s'"), id.c_str() ); try { if( n == 0 ) vid = 0; else if( (vid = StrToU( id.substr(0, n), 16 )) > 0xFFFF ) throw 1; } catch( const abi::__forced_unwind& ) { throw; } catch( ... ) { throw Error( _("ProductID: invalid vendor ID '%s'"), id.c_str() ); } try { if( n + 1 == id.size() ) pid = 0; else if( (pid = StrToU( id.substr(n + 1), 16 )) > 0xFFFF ) throw 1; } catch( const abi::__forced_unwind& ) { throw; } catch( ... ) { throw Error( _("ProductID: invalid product ID '%s'"), id.c_str() ); } } //}}} public: typedef std::list< ProductID > List; unsigned vid; // Vendor ID unsigned pid; // Product ID ProductID() : vid( 0 ) , pid( 0 ) {} ProductID( unsigned vendor, unsigned product ) : vid( vendor ) , pid( product ) {} ProductID( const std::string &id ) { parse_id_string( id ); } ProductID &operator=( const std::string &id ) { parse_id_string( id ); return *this; } std::string Str() const { return stringprintf( "%04x:%04x", vid, pid ); } }; //}}} // A reference to an individual device struct Device : public RefCounted { //{{{ public: typedef RefPtr< Device > Handle; typedef std::list< Handle > List; // An identifier that can uniquely indicate an individual device, //{{{ // This can be either its serial number, its bus and device number, // or its bus and device port. // // A device port string is of the form B-P[.P ...] // Where B is the bus number and P is the dot separated string of // port numbers which follow the topology to the desired device. // // A device number indication is of the form [B:]N // Where B is the bus number and N is the device address on the bus. // If the bus part is omitted the ID may not be unique, and the // device which will be identified if it is not is unspecified (it // will probably be the first match found, but that may not be the // same device everywhere this is used). // // The bus number and device address must be decimal integers. // // A serial number is an arbitrary string consisting of upper case // letter and numbers. It will not contain either a '-' or ':' and // must not be a number < 127 (which ensures it cannot be mistaken // for a device address). It must be at least 4 characters long, // (since in practice the current minimum is always 6). //}}} struct ID { //{{{ private: void parse_id_string( const std::string &id ) { //{{{ // busnum can be 0, at least on some platforms like FreeBSD, // but devnum should never be since that is the 'global' // address used during enumeration before one is assigned. // Is the id string a device port? size_t n = id.find('-'); if( n != std::string::npos ) { try { if( (busnum = StrToU( id.substr(0, n), 10 )) > 127 ) throw 1; } catch( const abi::__forced_unwind& ) { throw; } catch( ... ) { throw Error( _("Device::ID: invalid bus number '%s'"), id.c_str() ); } if( n + 1 >= id.size() ) throw Error( _("Device::ID: invalid device port '%s'"), id.c_str() ); devport = id.substr( n + 1 ); return; } // Is it a bus and device address? n = id.find(':'); if( n != std::string::npos ) { try { if( (busnum = StrToU( id.substr(0, n), 10 )) > 127 ) throw 1; } catch( const abi::__forced_unwind& ) { throw; } catch( ... ) { throw Error( _("Device::ID: invalid bus number '%s'"), id.c_str() ); } if( n + 1 >= id.size() ) throw Error( _("Device::ID: invalid device address '%s'"), id.c_str() ); try { devnum = StrToU( id.substr(n + 1), 10 ); if( devnum < 1 || devnum > 127 ) throw 1; } catch( const abi::__forced_unwind& ) { throw; } catch( ... ) { throw Error( _("Device::ID: invalid device address '%s'"), id.c_str() ); } return; } // Is it a device address without a bus number? if( id.size() < 4 ) { try { devnum = StrToU( id, 10 ); if( devnum < 1 || devnum > 127 ) throw 1; } catch( const abi::__forced_unwind& ) { throw; } catch( ... ) { throw Error( _("Device::ID: invalid device address '%s'"), id.c_str() ); } return; } // Consider it to be a serial number then. serial = id; } //}}} public: typedef std::list< ID > List; enum IDType { //{{{ NONE, DEVADDR, // Logical Bus:Device address DEVPORT, // Physical Bus-Port.Port.Port location SERIAL // Device serial number }; //}}} unsigned busnum; unsigned devnum; std::string devport; std::string serial; ID() : busnum( unsigned(-1) ) , devnum( unsigned(-1) ) {} ID( const std::string &id ) : busnum( unsigned(-1) ) , devnum( unsigned(-1) ) { parse_id_string( id ); } ID &operator=( const std::string &id ) { //{{{ busnum = unsigned(-1); devnum = unsigned(-1); devport.clear(); serial.clear(); parse_id_string( id ); return *this; } //}}} // Return true if this ID matches the given Device bool Matches( const Device::Handle &d ) const { //{{{ switch( Type() ) { case Device::ID::NONE: break; case Device::ID::DEVADDR: return devnum == d->GetDeviceNumber() && (busnum == unsigned(-1) || busnum == d->GetBusNumber()); case Device::ID::DEVPORT: return busnum == d->GetBusNumber() && devport == d->GetDevicePort(); case Device::ID::SERIAL: return serial == d->GetSerial(); } return false; } //}}} bool IsDevicePort() const { return ! devport.empty(); } bool IsDeviceAddress() const { return devnum != unsigned(-1); } bool IsSerialNumber() const { return ! serial.empty(); } IDType Type() const { //{{{ if( IsSerialNumber() ) return SERIAL; if( IsDeviceAddress() ) return DEVADDR; if( IsDevicePort() ) return DEVPORT; return NONE; } //}}} std::string Str() const { //{{{ switch( Type() ) { case NONE: return "No device selected"; case DEVADDR: if( busnum != unsigned(-1) ) return stringprintf( "Bus:Device %03u:%03u", busnum, devnum ); return stringprintf( "Bus:Device *:%03u", devnum ); case DEVPORT: return stringprintf( "Port %u-%s", busnum, devport.c_str() ); case SERIAL: return "Serial '" + serial + "'"; } return std::string(); } //}}} }; //}}} // We keep a minimal cache of the device configration, with only // the things that are actually interesting to us somewhere. // We do at least need to know wMaxPacketSize for the endpoints // that we use, and the endpoint address(es). struct Endpoint { //{{{ typedef std::vector< Endpoint > Vector; typedef std::set< uint8_t > AddressSet; // Returns: // LIBUSB_ENDPOINT_IN for a device -> host endpoint // LIBUSB_ENDPOINT_OUT for a host -> device endpoint static libusb_endpoint_direction Direction( uint8_t addr ) { //{{{ if( addr & 0x80 ) return LIBUSB_ENDPOINT_IN; return LIBUSB_ENDPOINT_OUT; } //}}} uint16_t wMaxPacketSize; uint8_t bEndpointAddress; Endpoint( const libusb_endpoint_descriptor &ep ) : wMaxPacketSize( ep.wMaxPacketSize ) , bEndpointAddress( ep.bEndpointAddress ) {} // Return the endpoint number. unsigned GetNumber() const { return bEndpointAddress & 0x0f; } libusb_endpoint_direction GetDirection() const { return Direction( bEndpointAddress ); } std::string Str() const { return stringprintf( "Endpoint %u %s, address 0x%02x, max packet %u", GetNumber(), GetDirection() == LIBUSB_ENDPOINT_IN ? " In" : "Out", bEndpointAddress, wMaxPacketSize ); } }; //}}} struct AltSetting { //{{{ typedef std::vector< AltSetting > Vector; // USB Endpoints are numbered from 1, so endpoint[0] here should be Endpoint 1. Endpoint::Vector endpoint; AltSetting( const libusb_interface_descriptor &alt ) { //{{{ for( size_t i = 0; i < alt.bNumEndpoints; ++i ) { #if 0 // The endpoint addresses aren't guaranteed to be sequential. // I have at least one device here with only Endpoint 1 and // Endpoint 3 (on separate interfaces). if( (alt.endpoint[i].bEndpointAddress & 0x0f) != i + 1 ) throw Error( _("Interface %u AltSetting %u: endpoint %zu has address 0x%02x"), alt.bInterfaceNumber, alt.bAlternateSetting, i + 1, alt.endpoint[i].bEndpointAddress ); #endif endpoint.push_back( Endpoint( alt.endpoint[i] ) ); } } //}}} void GetEndpointAddresses( Endpoint::AddressSet &a ) const { //{{{ for( Endpoint::Vector::const_iterator i = endpoint.begin(), e = endpoint.end(); i != e; ++i ) a.insert( i->bEndpointAddress ); } //}}} std::string Str() const { //{{{ std::string s; for( Endpoint::Vector::const_iterator i = endpoint.begin(), e = endpoint.end(); i != e; ++i ) s.append( " - " + i->Str() + "\n" ); return s; } //}}} }; //}}} struct Interface { //{{{ typedef std::vector< Interface > Vector; // USB AlternateSettings are numbered from 0, so alt[0] should be AlternateSetting 0. AltSetting::Vector alt; uint8_t bInterfaceNumber; Interface( const libusb_interface &iface ) : bInterfaceNumber( iface.num_altsetting > 0 ? iface.altsetting[0].bInterfaceNumber : uint8_t(-1) ) { //{{{ for( int i = 0; i < iface.num_altsetting; ++i ) { if( iface.altsetting[i].bAlternateSetting != i ) throw Error( _("Interface %u AltSetting %u: has alt value %u"), iface.altsetting[i].bInterfaceNumber, i, iface.altsetting[i].bAlternateSetting ); alt.push_back( AltSetting( iface.altsetting[i] ) ); } } //}}} void GetEndpointAddresses( Endpoint::AddressSet &a ) const { //{{{ // Ideally this should probably only recurse into the current // alt setting, but libusb doesn't appear to be able to get // that for us. It can set it, but not query it (portably). for( AltSetting::Vector::const_iterator i = alt.begin(), e = alt.end(); i != e; ++i ) i->GetEndpointAddresses( a ); } //}}} const AltSetting &GetAltSetting( uint8_t bAlternateSetting ) const { //{{{ if( __builtin_expect( alt.size() <= bAlternateSetting, 0 ) ) throw Error( _("Interface %u has no alt setting %u"), bInterfaceNumber, bAlternateSetting ); return alt[bAlternateSetting]; } //}}} std::string Str() const { //{{{ std::string s; for( size_t i = 0, e = alt.size(); i < e; ++i ) s.append( stringprintf( " - AltSetting %zu\n", i ) ) .append( alt[i].Str() ); return s; } //}}} }; //}}} struct Config { //{{{ typedef std::vector< Config > Vector; // Populate an array with the USB Configurations for a device. template< typename Array > static void Get( Array &configs, libusb_device *dev, const libusb_device_descriptor &desc ) { //{{{ // USB Configurations are numbered from 1, so configs[0] should be Configuration 1. configs.clear(); ScopedCancelState cancelstate; for( size_t i = 0; i < desc.bNumConfigurations; ++i ) { libusb_config_descriptor *c; int ret = libusb_get_config_descriptor( dev, uint8_t(i), &c ); if( ret ) throw USBError( ret, _("USBContext::Device::Config: " "failed to get configuration %zu descriptor"), i ); if( c->bConfigurationValue != i + 1 ) { uint8_t val = c->bConfigurationValue; libusb_free_config_descriptor( c ); throw Error( _("Configuration %zu: has configuration number %u"), i + 1, val ); } configs.push_back( Config( c ) ); libusb_free_config_descriptor( c ); } } //}}} // Dump the contents of an array of USB Configurations. template< typename Array > static std::string Dump( const Array &configs ) { //{{{ std::string s; for( size_t i = 0, e = configs.size(); i < e; ++i ) s.append( stringprintf( "Configuration %zu\n", i + 1 ) ) .append( configs[i].Str() ); return s; } //}}} // USB Interfaces are numbered from 0, so interface[0] should be Interface 0. Interface::Vector interface; uint8_t bConfigurationValue; Config( libusb_config_descriptor *c ) : bConfigurationValue( c->bConfigurationValue ) { //{{{ try { for( size_t i = 0; i < c->bNumInterfaces; ++i ) { if( c->interface[i].altsetting[0].bInterfaceNumber != i ) throw Error( _("Configuration %u Interface %zu: has interface number %u"), bConfigurationValue, i, c->interface[i].altsetting[0].bInterfaceNumber ); interface.push_back( Interface( c->interface[i] ) ); } } catch( const std::exception &e ) { throw Error( _("Configuration %u: %s"), bConfigurationValue, e.what() ); } } //}}} void GetEndpointAddresses( Endpoint::AddressSet &a ) const { //{{{ for( Interface::Vector::const_iterator i = interface.begin(), e = interface.end(); i != e; ++i ) i->GetEndpointAddresses( a ); } //}}} const Interface &GetInterface( uint8_t bInterfaceNumber ) const { //{{{ if( __builtin_expect( interface.size() <= bInterfaceNumber, 0 ) ) throw Error( _("Configuration %u has no interface %u"), bConfigurationValue, bInterfaceNumber ); return interface[bInterfaceNumber]; } //}}} std::string Str() const { //{{{ std::string s; for( size_t i = 0, e = interface.size(); i < e; ++i ) s.append( stringprintf( " - Interface %zu\n", i ) ) .append( interface[i].Str() ); return s; } //}}} }; //}}} // Scoped container for open device handles class Open : public RefCounted { //{{{ // Let Device get at the private constructor friend struct Device; private: typedef std::set< uint8_t > ClaimSet; typedef was_tr1::unordered_map< uint8_t, uint8_t > AltMap; Device::Handle m_device; libusb_device_handle *m_handle; ClaimSet m_claims; AltMap m_altmap; void do_open( libusb_device *dev ) { //{{{ ScopedCancelState cancelstate; int ret = libusb_open( dev, &m_handle ); if( ret < 0 ) throw USBError( ret, _("Device::Open failed") ); } //}}} void release_interface( uint8_t n ) { //{{{ int ret = libusb_release_interface( m_handle, n ); if( ret < 0 ) LogUSBError<2>( ret, _("Device::Open( %s ): failed to release interface %u"), m_device->IDStr().c_str(), n ); } //}}} void release_claims() { //{{{ for( ClaimSet::iterator i = m_claims.begin(), e = m_claims.end(); i != e; ++i ) release_interface( *i ); m_claims.clear(); m_altmap.clear(); } //}}} // This one is only for use in the Device constructor, where // we can't safely take a Handle to a partly constructed Device. Open( libusb_device *dev, const Device *d ) { //{{{ do_open( dev ); Log<3>( "+ Device::Open( %p %03u:%03u )\n", m_handle, d->m_busnum, d->m_devnum ); } //}}} public: typedef RefPtr< Open > Handle; Open( const Device::Handle &d ) : m_device( d ) { do_open( m_device->m_dev ); Log<3>( "+ Device::Open( %s )\n", m_device->IDStr().c_str() ); } ~Open() { //{{{ if( __builtin_expect( ! m_device, 0 ) ) Log<3>( "- Device::Open( %p )\n", m_handle ); else Log<3>( "- Device::Open( %s )\n", m_device->IDStr().c_str() ); ScopedCancelState cancelstate; release_claims(); libusb_close(m_handle); } //}}} // We cannot claim interfaces if the device is bound to another driver. void ForceDetach( uint8_t bInterfaceNumber ) { //{{{ ScopedCancelState cancelstate; int ret = libusb_detach_kernel_driver( m_handle, bInterfaceNumber ); if( ret ) throw USBError( ret, _("Device( %s ): failed to detach interface %u"), m_device->IDStr().c_str(), bInterfaceNumber ); Log<1>( "Detached interface %u of %s\n", bInterfaceNumber, m_device->IDStr().c_str() ); } //}}} // This may be called whether we've claimed any interfaces or not. //{{{ // If the device configuration cannot be restored, then the device // may be disconnected and reconnected, in which case this will then // throw a USBError with LIBUSB_ERROR_NOT_FOUND set, and this handle // will no longer be valid. If hotplug is enabled, the device should // be re-enumerated in that case. If not, you may need to rescan the // bus to find it again. //}}} void SoftReset() { //{{{ ScopedCancelState cancelstate; int ret = libusb_reset_device( m_handle ); if( ret ) throw USBError( ret, _("Device( %s ): SoftReset failed"), m_device->IDStr().c_str() ); Log<1>( "Reset %s\n", m_device->IDStr().c_str() ); } //}}} // This cannot be called on a device that is already claimed. void SetConfiguration( uint8_t bConfigurationValue ) { //{{{ ScopedCancelState cancelstate; int ret = libusb_set_configuration( m_handle, bConfigurationValue ); if( ret < 0 ) throw USBError( ret, _("Device( %s ): failed to set configuration %u"), m_device->IDStr().c_str(), bConfigurationValue ); } //}}} // Return the currently active bConfigurationValue. uint8_t GetConfiguration() { //{{{ ScopedCancelState cancelstate; int config; int ret = libusb_get_configuration( m_handle, &config ); if( ret ) throw USBError( ret, _("Device( %s ): failed to get current configuration"), m_device->IDStr().c_str() ); // Valid USB bConfigurationValue starts at 1. if( __builtin_expect( config < 1 || config > 255, 0 ) ) throw Error( _("Device( %s ): invalid current config (1 < %d < 256)"), m_device->IDStr().c_str(), config ); return uint8_t(config); } //}}} void ClaimInterface( uint8_t bInterfaceNumber ) { //{{{ ScopedCancelState cancelstate; int ret = libusb_claim_interface( m_handle, bInterfaceNumber ); if( ret < 0 ) throw USBError( ret, _("Device( %s ): failed to claim interface %u"), m_device->IDStr().c_str(), bInterfaceNumber ); m_claims.insert( bInterfaceNumber ); } //}}} void ClaimAllInterfaces() { //{{{ ScopedCancelState cancelstate; uint8_t bConfigurationValue = GetConfiguration(); const Device::Config &c = m_device->GetConfiguration( bConfigurationValue ); for( Interface::Vector::const_iterator i = c.interface.begin(), e = c.interface.end(); i != e; ++i ) { try { ClaimInterface( i->bInterfaceNumber ); } catch( ... ) { release_claims(); throw; } } } //}}} void ReleaseInterface( uint8_t bInterfaceNumber ) { //{{{ ScopedCancelState cancelstate; release_interface( bInterfaceNumber ); m_claims.erase( bInterfaceNumber ); m_altmap.erase( bInterfaceNumber ); } //}}} void ReleaseAllInterfaces() { //{{{ ScopedCancelState cancelstate; release_claims(); } //}}} // We must hold the claim to the device interface to call this. void SetAltInterface( uint8_t bInterfaceNumber, uint8_t bAlternateSetting ) { //{{{ ScopedCancelState cancelstate; int ret = libusb_set_interface_alt_setting( m_handle, bInterfaceNumber, bAlternateSetting ); if( ret < 0 ) throw USBError( ret, _("Device( %s ): failed to set interface %u, alt %u"), m_device->IDStr().c_str(), bInterfaceNumber, bAlternateSetting ); m_altmap[bInterfaceNumber] = bAlternateSetting; } //}}} // If the endpoint_address isn't specified explicitly, try to clear // a stall from all endpoints of the currently claimed interface(s). // // We must hold the claim to the device interface to call this. void ClearHalt( unsigned endpoint_address = 0x100 ) { //{{{ ScopedCancelState cancelstate; if( endpoint_address == 0x100 ) { Endpoint::AddressSet a; uint8_t bConfigurationValue = GetConfiguration(); const Device::Config &c = m_device->GetConfiguration( bConfigurationValue ); for( ClaimSet::iterator i = m_claims.begin(), e = m_claims.end(); i != e; ++ i ) { uint8_t alt = 0; if( m_altmap.find( *i ) != m_altmap.end() ) alt = m_altmap[ *i ]; c.GetInterface( *i ).GetAltSetting( alt ).GetEndpointAddresses( a ); } for( Endpoint::AddressSet::iterator i = a.begin(), e = a.end(); i != e; ++i ) ClearHalt( *i ); return; } int ret = libusb_clear_halt( m_handle, uint8_t(endpoint_address) ); if( ret ) throw USBError( ret, _("Device( %s ): ClearHalt failed for endpoint %02x"), m_device->IDStr().c_str(), endpoint_address ); Log<1>( "Device( %s ): cleared halt on endpoint %02x\n", m_device->IDStr().c_str(), endpoint_address ); } //}}} operator libusb_device_handle*() { return m_handle; } }; //}}} private: //{{{ // This is semi-arbitrarily chosen, since the real limit is not just // OS dependent, but driver and controller dependent too. It's big // enough that we'll need to do something very different to what we // currently are before we ever hit it, and is smaller than most of // the modern OS and driver limits that I could find documented. // // The main practical constraint is that this should be a multiple // of wMaxPacketSize, but any sensible value already always will be. //}}} static const size_t DEFAULT_MAX_TRANSFER_SIZE = 1024 * 1024; libusb_device *m_dev; Config::Vector m_configs; size_t m_maxtransfer; unsigned m_vendorid; unsigned m_productid; unsigned m_busnum; unsigned m_devnum; std::string m_mfg; std::string m_product; std::string m_serial; std::string m_devport; std::string m_devpath; std::string get_string( const Open::Handle &dev, uint8_t idx ) { //{{{ if( idx == 0 ) return std::string(); ScopedCancelState cancelstate; unsigned char s[128]; unsigned retries = 0; try_again: int ret = libusb_get_string_descriptor_ascii( *dev, idx, s, sizeof(s) ); if( ret < 0 ) { if( ++retries <= 3 ) { switch( ret ) { case LIBUSB_ERROR_PIPE: // A control endpoint can't really stall, but it may still // return this if some other error occurs, so just try again. LogUSBError<1>( ret, _("USB Device( %03u:%03u ): " "failed to get string descriptor %u " "on attempt %u, retrying"), m_busnum, m_devnum, idx, retries ); goto try_again; case LIBUSB_ERROR_TIMEOUT: case LIBUSB_ERROR_OTHER: LogUSBError<1>( ret, _("USB Device( %03u:%03u ): " "failed to get string descriptor %u " "on attempt %u, resetting device"), m_busnum, m_devnum, idx, retries ); // We can't call dev->SoftReset here, because we're still in // the Device constructor, and it wants to access m_device, // so just do it the old fashioned way here. ret = libusb_reset_device( *dev ); if( ret ) throw USBError( ret, _("USB Device( %03u:%03u ): reset failed"), m_busnum, m_devnum ); goto try_again; default: break; } } LogUSBError<1>( ret, _("USB Device( %03u:%03u ): " "failed to get string descriptor %u"), m_busnum, m_devnum, idx ); return std::string(); } s[ret] = '\0'; return reinterpret_cast(s); } //}}} void get_device_config( const libusb_device_descriptor &desc ) { //{{{ ScopedCancelState cancelstate; bool fetch_strings = true; #if HAVE_LIBUSB_GET_PORT_NUMBERS uint8_t ports[8]; int ret = libusb_get_port_numbers( m_dev, ports, sizeof(ports) ); if( ret > 0 ) { m_devport = stringprintf("%d", ports[0]); for( int i = 1; i < ret; ++i ) m_devport += stringprintf(".%d", ports[i]); } else if( ret < 0 && ret != LIBUSB_ERROR_NOT_SUPPORTED ) throw USBError( ret, _("USB Device( %03u:%03u ): failed to get port numbers"), m_busnum, m_devnum ); #endif #if EM_PLATFORM_LINUX // Prior to Linux 3.3, usbfs had a somewhat arbitrary limit of 16kB //{{{ // on the size of a bulk URB. For transfers larger than that, libusb // would try to hack around that limit by splitting them into smaller // blocks and submitting multiple URBs together. For extra fun, they // try to grep a kernel version out of what uname(2) returns, and if // it looks like 2.6.32 or later, they unconditionally enable the use // of bulk continuation ... Which would be great, except that it is // utterly broken with USB3 XHCI controllers since they don't stop on // short packets regardless of whether USBDEVFS_URB_SHORT_NOT_OK is // set or not. // // The USBDEVFS_GET_CAPABILITIES ioctl was added in Linux 3.6, and // correctly announces that USBDEVFS_CAP_BULK_CONTINUATION is not // available for XHCI controllers. So unless we have that available // to query (and therefore libusb does as well), the only safe thing // we can do is limit transfers from our side to the 16kB limit. // // For bonus fun, libusb itself didn't support that until 1.0.13-rc1 // which fortunately was the same release where LIBUSBX_API_VERSION // was added, so since Debian Wheezy still has 1.0.11, and could be // running a backported kernel, check for that too. // // For extra bonus fun, RHEL/CentOS 6 backported that ioctl to their // 2.6.32 kernel, but didn't pull in the scatter-gather patch that // was committed to the mainline kernel along with it, or a recent // enough libusb to actually use it (they shipped with 1.0.9), so we // need to work around their mongrel kernel, to cater for the case of // users installing a later libusb(x) version on those systems. // Which is a thing people really do to use later software there. //}}} m_maxtransfer = 16384; #if defined(USBDEVFS_GET_CAPABILITIES) && LIBUSB_SINCE(0x01000100) std::string usbfs_path = stringprintf("/dev/bus/usb/%03u/%03u", m_busnum, m_devnum ); int fd = open( usbfs_path.c_str(), O_RDWR ); if( fd < 0 ) { LogErr<1>( _("USBContext::Device failed to open %s"), usbfs_path.c_str() ); // If this fails, there's no point trying to read the string // descriptors because doing that needs to open the same device // in the same way too, at least in libusb up to 1.0.20 anyway. fetch_strings = false; } else { uint32_t devcaps; int ret = ioctl( fd, USBDEVFS_GET_CAPABILITIES, &devcaps ); if( ret < 0 ) { LogErr<1>( _("Device %03u:%03u failed to get capabilities"), m_busnum, m_devnum ); } else { Log<2>( _("Device %03u:%03u has capabilities 0x%02x\n"), m_busnum, m_devnum, devcaps ); #ifdef USBDEVFS_CAP_BULK_SCATTER_GATHER // Any of these capabilities should be enough to allow // safely relaxing the 16kB transfer limit on our side. if( devcaps & (USBDEVFS_CAP_BULK_CONTINUATION | USBDEVFS_CAP_NO_PACKET_SIZE_LIM | USBDEVFS_CAP_BULK_SCATTER_GATHER) ) #else // But RHEL 6 backported USBDEVFS_GET_CAPABILITIES // without pulling in the scatter-gather patch ... if( devcaps & (USBDEVFS_CAP_BULK_CONTINUATION | USBDEVFS_CAP_NO_PACKET_SIZE_LIM) ) #endif m_maxtransfer = DEFAULT_MAX_TRANSFER_SIZE; } close( fd ); } #endif #endif if( fetch_strings ) try { // We can't use OpenDevice here - we're currently still in the // Device constructor so if this Unref's it the world will end. Open::Handle h = new Open( m_dev, this ); m_mfg = get_string( h, desc.iManufacturer ); m_product = get_string( h, desc.iProduct ); m_serial = get_string( h, desc.iSerialNumber ); } BB_CATCH_ALL( 1, _("USBContext::Device failed to read string data") ) try { Config::Get( m_configs, m_dev, desc ); } catch( const std::exception &e ) { throw Error( _("Device %s: %s"), IDStr().c_str(), e.what() ); } } //}}} public: Device( libusb_device *dev ) : m_dev( dev ) , m_maxtransfer( DEFAULT_MAX_TRANSFER_SIZE ) , m_busnum( libusb_get_bus_number(m_dev) ) , m_devnum( libusb_get_device_address(m_dev) ) { //{{{ Log<2>( "+ Device( %03u:%03u )\n", m_busnum, m_devnum ); ScopedCancelState cancelstate; libusb_device_descriptor desc; int ret = libusb_get_device_descriptor( m_dev, &desc ); if( ret < 0 ) throw USBError( ret, _("Device( %03u:%03u ): failed to get descriptor"), m_busnum, m_devnum ); m_vendorid = desc.idVendor; m_productid = desc.idProduct; get_device_config( desc ); libusb_ref_device( m_dev ); } //}}} Device( libusb_device *dev, const libusb_device_descriptor &desc ) : m_dev( dev ) , m_maxtransfer( DEFAULT_MAX_TRANSFER_SIZE ) , m_vendorid( desc.idVendor ) , m_productid( desc.idProduct ) , m_busnum( libusb_get_bus_number(m_dev) ) , m_devnum( libusb_get_device_address(m_dev) ) { //{{{ Log<2>( "+ Device( %03u:%03u )\n", m_busnum, m_devnum ); ScopedCancelState cancelstate; get_device_config( desc ); libusb_ref_device( m_dev ); } //}}} ~Device() { //{{{ Log<2>( "- Device( %03u:%03u )\n", m_busnum, m_devnum ); ScopedCancelState cancelstate; libusb_unref_device( m_dev ); } //}}} bool operator==( const Device &d ) const { return m_busnum == d.m_busnum && m_devnum == d.m_devnum; } Open::Handle OpenDevice() { return new Open( this ); } // Device info accessors //{{{ void SetManufacturer( const std::string &id ) { m_mfg = id; } void SetProduct( const std::string &id ) { m_product = id; } void SetSerial( const std::string &id ) { m_serial = id; } void SetDevicePort( const std::string &str ) { m_devport = str; } void SetDevpath( const std::string &str ) { m_devpath = str; } unsigned GetVendorID() const { return m_vendorid; } unsigned GetProductID() const { return m_productid; } const std::string &GetManufacturer() const { return m_mfg; } const std::string &GetProduct() const { return m_product; } const std::string &GetSerial() const { return m_serial; } unsigned GetBusNumber() const { return m_busnum; } unsigned GetDeviceNumber() const { return m_devnum; } const std::string &GetDevicePort() const { return m_devport; } const std::string &GetDevpath() const { return m_devpath; } unsigned GetNumConfigurations() const { return unsigned(m_configs.size()); } const Config::Vector &GetConfigurations() const { return m_configs; } const Config &GetConfiguration( size_t n ) const { //{{{ for( Config::Vector::const_iterator i = m_configs.begin(), e = m_configs.end(); i != e; ++i ) { if( i->bConfigurationValue == n ) return *i; } throw Error( _("Device::GetConfiguration( %zu ) no such configuration for %s"), n, IDStr().c_str() ); } //}}} size_t GetMaxTransferSize() const { return m_maxtransfer; } std::string BusAddressStr() const { return stringprintf( "%03u:%03u", m_busnum, m_devnum ); } std::string DevicePortStr() const { return stringprintf( "%u-", m_busnum ) + m_devport; } std::string IDStr() const { //{{{ if( m_devport.empty() ) return stringprintf( "%03u:%03u Serial '%s'", m_busnum, m_devnum, m_serial.c_str() ); return stringprintf( "%03u:%03u Serial '%s', port %u-%s", m_busnum, m_devnum, m_serial.c_str(), m_busnum, m_devport.c_str() ); } //}}} std::string ProductStr() const { return stringprintf( "Serial '%s', Mfg '%s', Product '%s'", m_serial.c_str(), m_mfg.c_str(), m_product.c_str() ); } std::string VerboseStr() const { //{{{ // We might not always know which physical port the device is attached to. // We can get that from udev, or from libusb 1.0.16 or later (on some of // the platforms it supports), but we aren't guaranteed to have those. if( m_devport.empty() ) return stringprintf( "%03u:%03u %04x:%04x Serial '%s', Mfg '%s', Product '%s'", m_busnum, m_devnum, m_vendorid, m_productid, m_serial.c_str(), m_mfg.c_str(), m_product.c_str() ); return stringprintf( "%03u:%03u %04x:%04x Serial '%s', Mfg '%s', Product '%s', port %s", m_busnum, m_devnum, m_vendorid, m_productid, m_serial.c_str(), m_mfg.c_str(), m_product.c_str(), DevicePortStr().c_str() ); } //}}} // Return the device information as a string of nul separated fields. //{{{ // This is primarily intended for importing device data into shell scripts // in a way that is both safe and easy to parse. The string will contain // 9 fields, each terminated by a trailing nul. Some fields may be empty. // // The fields contain (in the order they are output): // // \nD: - Start of record magic. Used to sanity check that we have // the first field when multiple devices are output together, // and as a promise of the format and content of the following // data. If we ever need to break that promise, the magic will // change too to signal whatever the new promise may be. // // Bus number - A 3 digit, 0-padded, decimal number. The USB bus that the // device is on. // // Device number - A 3 digit, 0-padded, decimal number. The logical address // of the device on the given USB bus. // // Vendor ID - A 4 digit hexadecimal number. The device vendor's USB ID. // // Product ID - A 4 digit hexadecimal number. The device's USB product ID. // // Serial number - An arbitrary string containing the device serial data. // This may be empty. // // Manufacturer - An arbitrary string containing the manufacturer's name. // This may be empty. // // Product - An arbitrary string containing the product's name. // This may be empty. // // Device port - A period separated string of the physical port numbers that // are the device's physical address on the given USB bus. // This may be empty, since we aren't always able to obtain // this information of every platform. //}}} std::string ShellMrStr() const { //{{{ std::string s( "\nD:" ); s.append( 1, '\0' ); s.append( stringprintf( "%03u", m_busnum ) ).append( 1, '\0' ); s.append( stringprintf( "%03u", m_devnum ) ).append( 1, '\0' ); s.append( stringprintf( "%04x", m_vendorid ) ).append( 1, '\0' ); s.append( stringprintf( "%04x", m_productid ) ).append( 1, '\0' ); s.append( m_serial ).append( 1, '\0' ); s.append( m_mfg ).append( 1, '\0' ); s.append( m_product ).append( 1, '\0' ); s.append( m_devport ).append( 1, '\0' ); return s; } //}}} //}}} }; //}}} private: libusb_context *m_usb; mutable pthread_mutex_t m_device_mutex; Device::List m_devices; protected: // Derived classes can override these to get hotplug notification. virtual void DeviceAdded( const Device::Handle &d ) { (void)d; } virtual void DeviceRemoved( const Device::Handle &d ) { (void)d; } libusb_context *GetContext() const { return m_usb; } Device::Handle find_device( unsigned busnum, unsigned devnum ) { //{{{ ScopedCancelState cancelstate; Device::Handle h; libusb_device **devs; ssize_t ret = libusb_get_device_list( m_usb, &devs ); if( ret < 0 ) throw USBError( int(ret), _("USBContext: failed to enumerate devices") ); for( libusb_device **dev = devs; *dev; ++dev ) { libusb_device *d = *dev; if( libusb_get_bus_number(d) == busnum && libusb_get_device_address(d) == devnum ) { h = new Device( d ); break; } } libusb_free_device_list( devs, 1 ); return h; } //}}} void AddDevice( const Device::Handle &d ) { //{{{ ScopedMutex lock( &m_device_mutex ); for( Device::List::iterator i = m_devices.begin(), e = m_devices.end(); i != e; ++i ) { if( **i == *d ) { // This shouldn't usually happen, but the libusb hotplug support // can sometimes report a device twice if it loses the wrong race // and it wouldn't be impossible for some derived class to bork // this up and try to add a device more than once somehow. // // In the (very) worst case we'd only need to scan a few hundred // devices here, so this isn't a very expensive sanity check. Log<1>( _("USBContext::AddDevice: already have device %s\n"), d->VerboseStr().c_str() ); return; } } Log<2>( _("USBContext::AddDevice: %s %s\n"), d->VerboseStr().c_str(), d->GetDevpath().c_str() ); m_devices.push_back( d ); try { DeviceAdded( d ); } BB_CATCH_ALL( 0, "USBContext::AddDevice failed" ) } //}}} void RemoveDevice( libusb_device *dev ) { //{{{ ScopedMutex lock( &m_device_mutex ); unsigned busnum = libusb_get_bus_number( dev ); unsigned devnum = libusb_get_device_address( dev ); for( Device::List::iterator i = m_devices.begin(), e = m_devices.end(); i != e; ++i ) { if( (*i)->GetBusNumber() == busnum && (*i)->GetDeviceNumber() == devnum ) { Device::Handle d = *i; m_devices.erase( i ); Log<2>( _("USBContext::RemoveDevice: removed %s\n"), d->VerboseStr().c_str() ); try { DeviceRemoved( d ); } BB_CATCH_ALL( 0, "USBContext::RemoveDevice failed" ) return; } } Log<4>( _("USBContext::RemoveDevice: no matching device for %03u:%03u\n"), busnum, devnum ); } //}}} void RemoveDeviceByDevpath( const char *devpath ) { //{{{ ScopedMutex lock( &m_device_mutex ); for( Device::List::iterator i = m_devices.begin(), e = m_devices.end(); i != e; ++i ) { if( (*i)->GetDevpath() == devpath ) { Device::Handle d = *i; m_devices.erase( i ); Log<2>( _("USBContext::RemoveDeviceByDevpath( %s ): removed %s\n"), devpath, d->VerboseStr().c_str() ); try { DeviceRemoved( d ); } BB_CATCH_ALL( 0, "USBContext::RemoveDeviceByDevpath failed" ) return; } } Log<4>( _("USBContext::RemoveDeviceByDevpath( %s ): no matching device\n"), devpath ); } //}}} void WarmplugAllDevices() { //{{{ ScopedMutex lock( &m_device_mutex ); for( Device::List::const_iterator i = m_devices.begin(), e = m_devices.end(); i != e; ++i ) { try { DeviceAdded( *i ); } BB_CATCH_ALL( 0, "USBContext: warmplug failed" ) } } //}}} public: USBContext() { //{{{ Log<2>( "+ USBContext\n" ); ScopedCancelState cancelstate; int ret = libusb_init( &m_usb ); if( ret ) throw USBError( ret, _("USBContext: failed to create libusb context") ); pthread_mutex_init( &m_device_mutex, NULL ); } //}}} virtual ~USBContext() { //{{{ Log<2>( "- USBContext\n" ); // Release all the device references before destroying the context. // The libusb_device doesn't keep a reference count for the context // but it does have a pointer to it which something might access. { ScopedMutex lock( &m_device_mutex ); m_devices.clear(); } pthread_mutex_destroy( &m_device_mutex ); // Extra debug log checkpoints here, because on FreeBSD 11, the // call to libusb_exit can take 4 seconds to complete, which can // otherwise look like our code has become hung up somewhere. Log<4>( "USBContext: waiting for libusb_exit\n" ); ScopedCancelState cancelstate; libusb_exit( m_usb ); Log<4>( "USBContext: libusb_exit completed\n" ); } //}}} // Return true if this build, on this platform, has device hotplug support. virtual bool HasHotplugSupport() const { return false; } // If vendorid and productid are 0, enumerate all devices. // Otherwise only those matching the given VID:PID. // If append is true, add them to any existing list of devices, otherwise replace them. void EnumerateDevices( unsigned vendorid = 0, unsigned productid = 0, bool append = false ) { //{{{ ScopedMutex lock( &m_device_mutex ); ScopedCancelState cancelstate; libusb_device **devs; ssize_t ret = libusb_get_device_list( m_usb, &devs ); if( ret < 0 ) throw USBError( int(ret), _("USBContext: failed to enumerate devices") ); if( ! append ) m_devices.clear(); for( libusb_device **dev = devs; *dev; ++dev ) { libusb_device *d = *dev; libusb_device_descriptor desc; int r = libusb_get_device_descriptor(d, &desc); if( r < 0 ) { LogUSBError<1>( r, _("USBContext::EnumerateDevices: failed to get descriptor") ); continue; } if( (vendorid == 0 && productid == 0) || (desc.idVendor == vendorid && desc.idProduct == productid) ) m_devices.push_back( new Device(d, desc) ); else Log<4>( "USBContext: ignoring %04x:%04x\n", desc.idVendor, desc.idProduct ); } libusb_free_device_list( devs, 1 ); } //}}} // Replace the current list of enumerated devices with only those matching id. void EnumerateDevices( const ProductID &id ) { EnumerateDevices( id.vid, id.pid, false ); } // Append to the current list of enumerated devices any matching id. void AppendDevices( const ProductID &id ) { EnumerateDevices( id.vid, id.pid, true ); } unsigned GetNumDevices() const { //{{{ ScopedMutex lock( &m_device_mutex ); return unsigned(m_devices.size()); } //}}} // Get a device by logical or physical address, or by serial number. Device::Handle GetDevice( const Device::ID &id ) const { //{{{ ScopedMutex lock( &m_device_mutex ); for( Device::List::const_iterator i = m_devices.begin(), e = m_devices.end(); i != e; ++i ) if( id.Matches( *i ) ) return *i; throw Error( _("USBContext::GetDevice( %s ): no such device"), id.Str().c_str() ); } //}}} // Return a list of all currently available devices. //{{{ // Most things shouldn't ever (need to) use this, unless they really do // want an 'unchanging' snapshot of the current state which won't have // devices added (or removed) if hotplug events occur. //}}} Device::List GetDevices() const { //{{{ ScopedMutex lock( &m_device_mutex ); return m_devices; } //}}} // List all available devices in a human readable form void ListDevices() const { //{{{ ScopedMutex lock( &m_device_mutex ); if( m_devices.empty() ) { printf( _("No devices found.\n") ); return; } size_t n = m_devices.size(); printf( P_("Have %zu device:\n", "Have %zu devices:\n", n), n ); if( n > 0 ) { if( opt_verbose ) printf( " Bus:Dev VID:PID\n" ); else printf( " Bus:Device\n" ); } for( Device::List::const_iterator i = m_devices.begin(), e = m_devices.end(); i != e; ++i ) { if( opt_verbose ) printf( " %s\n", (*i)->VerboseStr().c_str() ); else printf( " %s\n", (*i)->IDStr().c_str() ); } } //}}} // List all available devices in a machine readable form // that is suitable for importing into shell scripts. void ListDevicesShellMR() const { //{{{ std::string s; for( Device::List::const_iterator i = m_devices.begin(), e = m_devices.end(); i != e; ++i ) s.append( (*i)->ShellMrStr() ); if( ! s.empty() ) if( write( STDOUT_FILENO, s.data(), s.size() ) ) { /* Suppress the unused return warning here */ } } //}}} template< int N > BB_PRINTF_FORMAT(2,3) static void LogUSBError( int err, const char *format, ... ) { //{{{ va_list arglist; std::string msg; va_start( arglist, format ); msg = vstringprintf( format, arglist ); Log( "%s: %s\n", msg.c_str(), libusb_strerror(libusb_error(err)) ); va_end( arglist ); } //}}} }; //}}} } // BitB namespace #endif // _BB_USBCONTEXT_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/include/bit-babbler/users.h0000644000000000000000000000424214136173163015526 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2004 - 2021, Ron #ifndef _BB_USERS_H #define _BB_USERS_H #if EM_PLATFORM_POSIX #include #include #include namespace BitB { // There is no particular reason for this to be static inline, aside from // the fact that it is only ever be used in just one place, and probably // won't be used anywhere else for now, so bundling it off into a separate // impl file is a touch on the overkill side. If we later package all of // this up into a convenience library for applications to use that is most // probably what we should do with it though. static inline gid_t GetGID( const std::string &group ) { //{{{ if( group.empty() ) return gid_t(-1); long bufsize = sysconf(_SC_GETGR_R_SIZE_MAX); char *buf; struct group grent; struct group *have_result; if( bufsize <= 0 ) bufsize = 65536; try_again: buf = new char[bufsize]; switch( getgrnam_r( group.c_str(), &grent, buf, size_t(bufsize), &have_result ) ) { case 0: case ENOENT: break; case ERANGE: // Draw the line at some arbitrarily insane number if( bufsize < 4 * 1024 * 1024 ) { delete [] buf; bufsize <<= 1; goto try_again; } BB_FALLTHROUGH; // fall through default: delete [] buf; throw SystemError( "GetGID: failed to get group data for %s", group.c_str() ); } if( have_result == NULL ) { delete [] buf; throw Error( "GetGID: failed to get group data for %s", group.c_str() ); } gid_t gid = grent.gr_gid; delete [] buf; return gid; } //}}} } // BitB namespace #endif // EM_PLATFORM_POSIX #endif // _BB_USERS_H // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/libvirt/0002755000000000000000000000000014136173163012077 5ustar bit-babbler-0.9/libvirt/bbvirt0000755000000000000000000004210514136173163013315 0ustar #!/bin/bash # This file is distributed as part of the bit-babbler package. # Copyright 2015 - 2021, Ron # Default configuration if not explicitly specified. config_dir="/etc/bit-babbler" config_file="$config_dir/vm.conf" verbose=0 # Default to searching the PATH for tools we use. seedd="seedd" virsh="virsh" logger_opts=( "--id=$$" '-t' "${0##*/}" ) log_to_syslog= log_to_stderr=1 log() { [ -z "$log_to_syslog" ] || logger "${logger_opts[@]}" -- "$*" [ -z "$log_to_stderr" ] || echo "$*" 1>&2 } die() { log "$0: $*" exit 1 } verb() { local n=$1 shift (( verbose < n )) || log "$*" } usage() { cat 1>&2 < [options] bbvirt attach-all|detach-all [] [options] Helper script to hotplug BitBabbler devices into (and back out of) libvirt managed virtual machines, either manually, or triggered by udev events. The first form above is used to attach or detach a single device from a VM, and is suitable for use as a hotplug trigger, such as a udev rule. The second form is offered as a convenience for the task of attaching or detaching all devices assigned to a particular VM domain (or all of the configured domains if no explicit domain is passed). A may be specified by its serial number, its logical address on the USB bus in the form BUSNUM:DEVNUM, or its physical address on the USB bus in the form BUS-PORT[.PORT ...]. A is the libvirt VM domain name. The following options may also be used: -C, --config "file" Use the specified configuration file for the default assignment of devices to VM domains. -c, --connect "URI" Override the default virsh URI (or the DOMAIN_URI that is specified in the configuration file). -D, --domain "name" Act on the given domain, overriding any assignment of the device in the configuration file. -b, --busnum "num" Explicitly specify the USB bus number that the device is attached to, rather than searching for it. -d, --devnum "num" Explicitly specify the logical USB device number on the USB bus, rather than searching for it. --syslog Log messages to syslog as well as stderr. -n, --dry-run Don't attach or detach any devices, just show what would happen if this was a live run. -v, --verbose Be more noisy about what is happening. -?, --help Shows this usage summary. EOF exit "$1" } parse_args() { while (( $# )); do case $1 in attach|detach) action=$1; shift; device=$1 ;; attach-all|detach-all) action=${1%-all} do_all=1 if [[ $2 != -* ]]; then shift domain=$1 fi ;; --busnum=*) printf -v busnum "%03d" "$(( 10#${1#*=} ))" ;; --busnum|-b) shift; printf -v busnum "%03d" "$(( 10#$1 ))" ;; --devnum=*) printf -v devnum "%03d" "$(( 10#${1#*=} ))" ;; --devnum|-d) shift; printf -v devnum "%03d" "$(( 10#$1 ))" ;; --connect=*) uri=${1#*=} ;; --connect|-c) shift; uri=$1 ;; --domain=*) domain=${1#*=} ;; --domain|-D) shift; domain=$1 ;; --config=*) config_file=${1#*=} ;; --config|-C) shift; config_file=$1 ;; --syslog) log_to_syslog=1 ;; --dry-run|-n) dry_run=1 ;& # --dry-run implies --verbose --verbose|-v) (( ++verbose )) ;; -vv) (( verbose += 2 ));; -vvv) (( verbose += 3 ));; -vvvv) (( verbose += 4 ));; --help|-\?) show_help=1 ;; *) die "ERROR: unrecognised option '$1', try --help" ;; esac shift done } parse_args "$@" [ -z "$show_help" ] || usage 0 # Import the configuration of which devices belong to which VM domains. import_domain_config() { [[ $config_file == */* ]] || config_file="$config_dir/$(basename -- "$config_file" .conf).conf" if [ -f "$config_file" ] && [ -r "$config_file" ]; then . "$config_file" else verb 1 "Unable to read config file '$config_file'" exit 0 fi } # Test if a string is valid to use in a constructed variable name. # We need to explicitly check this to avoid having "undefined" but wrong things # happen if we dereference an invalid indirect variable name. A "name" in bash # is defined as: # # 'A word consisting only of alphanumeric characters and underscores, and # beginning with an alphabetic character or an under‐score.' # # With an implicit assumption that all those characters are also only ASCII. # We don't need to validate that the first character isn't a digit here, because # we know we will always be appending this to a valid prefix string before use. # We do want to validate that it's not an empty string though. is_valid_as_variable_name() { # If we could be sure this would only run with bash 4.3 or later, then # we could use 'shopt -s globasciiranges' and drop the [:ascii:] test, # but Wheezy still has bash 4.2 - alternatively we could force use of the # C locale here to avoid having non-ascii characters collated into the # range a-z, but not being locale agnostic is ugly, so just test against # the :ascii: character class explicitly. [[ -n $1 && $1 != *[^a-zA-Z0-9_]* && $1 != *[^[:ascii:]]* ]] } # Build indices mapping config domain IDs (which must include only characters # which are valid in variable names) to/from libvirt domain names (which don't # restrict the allowed character set anymore). Given either ID as input, this # lets us determine both the config ID and libvirt domain name when needed. # They'll only differ when the DOMAIN_NAME_* override is used to explicitly # specify the libvirt domain name. map_domain_names() { import_domain_config declare -gA libvirt_domains declare -gA config_domains local n k verb 4 "Mapping domain identifiers:" # First assume every config ID corresponds to a libvirt guest domain name. for n in "${!DOMAIN_RNG_@}"; do n=${n#DOMAIN_RNG_} libvirt_domains[$n]=$n config_domains[$n]=$n done # It's not very likely that someone might have a DOMAIN_URI defined without # a corresponding DOMAIN_RNG, so this is normally redundant, but in theory # it is possible for someone to want to manually add a device to a domain # which currently has its DOMAIN_RNG commented out, but still want to use # the URI from the config instead of specifying that manually too. for n in "${!DOMAIN_URI_@}"; do n=${n#DOMAIN_URI_} libvirt_domains[$n]=$n config_domains[$n]=$n done # Then override libvirt_domains for each ID with an explicit DOMAIN_NAME, # and add a config_domains reverse mapping for the real libvirt guest name. for n in "${!DOMAIN_NAME_@}"; do k=${!n} n=${n#DOMAIN_NAME_} libvirt_domains[$n]=$k config_domains[$k]=$n done if (( verbose > 3 )); then local s=" " for n in "${!libvirt_domains[@]}"; do log " config 'DOMAIN_*_$n' ${s:0:15-${#n}}-> libvirt domain ${libvirt_domains[$n]}" done for n in "${!config_domains[@]}"; do log " domain name '$n' ${s:0:19-${#n}}-> config DOMAIN_*_${config_domains[$n]}" done fi } # Device array indices DA_STRIDE=9 DA_BUSNUM=1 DA_DEVNUM=2 DA_SERIAL=5 DA_PORTNUM=8 DA_MAGIC=$'\nD:' # Import details of available devices in the shell machine readable format. get_available_devices() { all_devices=() # Clear IFS, the leading \n is part of the magic and we don't want it stripped. while IFS= read -r -d '' f; do all_devices+=("$f") done < <( "$seedd" --shell-mr ) if (( verbose > 3 )); then printf "seedd reported devices:" printf " '%s'" "${all_devices[@]}" printf "\n" fi } # Find a device matching some combination of attributes. # get_device_by index match [index match ...] get_device_by() { local i j compare=( "$@" ) selected_device=() for (( i = 0; i < ${#all_devices[@]}; i += DA_STRIDE )); do # Assert the array is framed with the expected stride and magic. [ "${all_devices[$i]}" = "$DA_MAGIC" ] || die "Invalid device array magic at element $i '${all_devices[$i]}'" # Try the next device if this one doesn't match all attributes for (( j = 0; j < $#; j += 2 )); do index=${compare[$j]} match=${compare[(($j + 1))]} [ "${all_devices[(($i + $index))]}" = "$match" ] || continue 2 done # Slice all details of the first matching device. selected_device=( "${all_devices[@]:$i:$DA_STRIDE}" ) break; done } # Do $action for each available device assigned to domain $1 (with $propagate_opts) act_on_all_devices_in_domain() { local config_domain=${config_domains[$1]} local devs="DOMAIN_RNG_${config_domain}[@]" local dev verb 3 "" verb 3 "${action^}ing all devices for domain '$1' (config '$config_domain')" if [ -z "$config_domain" ]; then verb 1 "Domain '$1' has no devices assigned in '$config_file'." return fi for dev in "${!devs}"; do verb 4 "Checking for device '$dev'" get_device_by "$DA_SERIAL" "$dev" if (( ${#selected_device[@]} == DA_STRIDE )); then exec_opts=( "$action" "$dev" -D "$config_domain" ) exec_opts+=( -b "${selected_device[$DA_BUSNUM]}" ) exec_opts+=( -d "${selected_device[$DA_DEVNUM]}" ) exec_opts+=( "${propagate_opts[@]}" ) verb 2 "$0 ${exec_opts[*]}" "$0" "${exec_opts[@]}" else verb 2 "Failed to find device '$dev'." fi done } # For attach-all or detach-all, we synthesise a series of attach/detach calls # with all the necessary options for each device in the requested domain(s). if [ -n "$do_all" ]; then map_domain_names get_available_devices propagate_opts=( ${config_file:+ -C "$config_file"} ) propagate_opts+=( ${uri:+ -c "$uri"} ) propagate_opts+=( ${dry_run:+ -n} ) # Propagate verbose flags up to -vvvv (the maximum level we actually use), # accounting for the fact that dry_run bumps the verbosity level too. v='' for (( i = ${dry_run:-0}; i < verbose; ++i )); do v+='v' done propagate_opts+=( ${v:+ "-${v:0:4}"} ) if [ -n "$domain" ]; then # Act on all the devices configured for the given domain act_on_all_devices_in_domain "$domain" else # Act on all the devices configured for all domains for dom in "${!DOMAIN_RNG_@}"; do act_on_all_devices_in_domain "${dom#DOMAIN_RNG_}" done fi exit 0 fi # We need at least these two to do anything at all below here. [ -n "$action" ] || die "No action specified." [ -n "$device" ] || die "No device specified." check_or_set_busnum() { if [ -z "$busnum" ]; then busnum=$1 elif [ "$busnum" != "$1" ]; then die "Device bus $1 != --busnum $busnum." fi } check_or_set_devnum() { if [ -z "$devnum" ]; then devnum=$1 elif [ "$devnum" != "$1" ]; then die "Device number $1 != --devnum $devnum." fi } # Figure out which device we've been asked to act on. if [[ $device =~ ^[[:digit:]]{1,3}:[[:digit:]]{1,3}$ ]]; then # We were passed a device logical address in the form BUSNUM:DEVNUM printf -v bnum "%03d" "$(( 10#${device%:*} ))" printf -v dnum "%03d" "$(( 10#${device#*:} ))" check_or_set_busnum "$bnum" check_or_set_devnum "$dnum" get_available_devices get_device_by "$DA_BUSNUM" "$busnum" "$DA_DEVNUM" "$devnum" (( ${#selected_device[@]} == DA_STRIDE )) || die "Failed to find device '$device'" devserial=${selected_device[$DA_SERIAL]} verb 1 "Device at logical address $bnum:$dnum has serial '$devserial'." elif [[ $device =~ ^[[:digit:]]+-[[:digit:].]+$ ]]; then # We were passed a device physical address in the form BUS-PORT[.PORT ...] printf -v bnum "%03d" "$(( 10#${device%-*} ))" pnum=${device#*-} check_or_set_busnum "$bnum" get_available_devices get_device_by "$DA_BUSNUM" "$busnum" "$DA_PORTNUM" "$pnum" (( ${#selected_device[@]} == DA_STRIDE )) || die "Failed to find device '$device'" devserial=${selected_device[$DA_SERIAL]} check_or_set_devnum "${selected_device[$DA_DEVNUM]}" verb 1 "Device at physical address $((10#$bnum))-$pnum has serial '$devserial'." elif [[ $device =~ ^[A-Z0-9]{6,7}$ ]]; then # If it wasn't either of the above, assume this may be a serial number. devserial=$device else die "Invalid device identifier '$device'" fi # Build an index mapping device serial numbers to (config) domain names. map_devices_to_domains() { import_domain_config declare -gA domains local dom dev devs verb 4 "Mapping device serial numbers to domain identifiers:" for dom in "${!DOMAIN_RNG_@}"; do verb 4 " config: $dom" devs="${dom}[@]" for dev in "${!devs}"; do verb 4 " dev: $dev" domains[$dev]=${dom#DOMAIN_RNG_} done done if (( verbose > 2 )); then for dev in "${!domains[@]}"; do log " device $dev is in domain ${domains[$dev]}" done fi } # If the VM domain wasn't explicitly specified, try to find it from the # configured device allocations. It's not an error for it not to be, # the udev rule will run this for all devices, even those that we aren't # passing through to a VM. if [ -z "$domain" ]; then map_devices_to_domains domain=${domains[$devserial]} if [ -z "$domain" ]; then verb 1 "Device '$devserial' is not assigned to any domain." exit 0 fi # We know the serial number lookup will return the config ID, so we can get # the libvirt domain by just checking if DOMAIN_NAME_* was set for it too. name_config="DOMAIN_NAME_$domain" libvirt_domain=${!name_config:-$domain} config_domain=$domain else # Find the config ID and libvirt domain name to use. If we don't have any # mapping for the given $domain, then just use that name verbatim, since we # could be here because someone is manually attaching or detaching a device # to a libvirt domain which isn't included in the config file definitions, # and that is an ok thing to be doing if complete automation isn't needed. map_domain_names libvirt_domain=${libvirt_domains[$domain]:-$domain} config_domain=${config_domains[$domain]} # Check if it's safe to fall back to assuming $domain for this one. [ -n "$config_domain" ] || ! is_valid_as_variable_name "$domain" || config_domain="$domain" fi verb 4 "Domain '$domain' => config '$config_domain', libvirt domain '$libvirt_domain'" # Check if we need to pass an explicit --connect URI to virsh. # The $config_domain should already be validated, so we could just check if # it is not empty here, but it doesn't hurt to apply the full test here too. if [ -z "$uri" ] && is_valid_as_variable_name "$config_domain"; then uri_config="DOMAIN_URI_$config_domain" uri=${!uri_config} fi # Check that we were passed, or have determined, the logical address of the # device, since that is the only way that we can pass it to virsh at present. if [ -z "$busnum" ] || [ -z "$devnum" ]; then get_available_devices get_device_by "$DA_SERIAL" "$devserial" check_or_set_busnum "${selected_device[$DA_BUSNUM]}" check_or_set_devnum "${selected_device[$DA_DEVNUM]}" if [ -z "$busnum" ] || [ -z "$devnum" ]; then die "Could not get bus or device number for '$device'" fi fi # Create the foul format that virsh requires us to use for this. device_xml() { cat <

EOL } opts=( ${uri:+ -c "$uri"} "$action-device" "$libvirt_domain" ) # Tell them what we are going to do. verb 1 "$virsh ${opts[*]} --live" verb 2 "$(device_xml "$busnum" "$devnum")" # Do it (maybe). [ -n "$dry_run" ] || "$virsh" "${opts[@]}" <(device_xml "$busnum" "$devnum") --live # Either way, don't fail at doing it. This could be called by udev when a # device is hotplugged, and we don't really want it to bitch at people just # because the domain isn't actually running right now. It's not necessarily # an error for running this to be a no-op. # # We could try to do some other checks to see if the VM is running first, but # it's hard to avoid a race where it might start or stop between checking that # and acting on it, so we just try it and either it will work or it won't. exit 0 # vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/libvirt/qemu-hook0000755000000000000000000001001114136173163013721 0ustar #!/bin/bash # This file is distributed as part of the bit-babbler package. # Copyright 2015 - 2018, Ron # # Example libvirt QEMU hook for cold-plugging BitBabbler devices into # newly started virtual machines. To use this, it must be installed # as /etc/libvirt/hooks/qemu (or wherever the equivalent configuration # is found on your system), and then libvirtd must be restarted if you # did not previously have a 'qemu' hook installed there. It does not # need to be restarted again if you modify an existing script there. # # This script assumes that you have the udev rules from the bit-babbler # package installed and active, and that you have bbvirt(1) configured # to assign devices to the guest domains you want them available in. # # It will use the device assignments from /etc/bit-babbler/vm.conf to # trigger cold plug events for each device that should be made available # in the guest VM that is being started (which will in turn signal the # bbvirt helper script to actually attach them to that guest). . /etc/bit-babbler/vm.conf guest_name=$1 operation=$2 # The path where udevadm is found was Helpfully changed by systemd from the # original location in /sbin to now be in /bin. Since we can't be certain # what will be in the PATH when this is called, and since we still need to # support systems prior to that change, that means we need to search for it # ourselves. If we can't find it in any of the expected places, then fall # back to assuming the user's system really does have it in their PATH. # If that's not true, this will fail out soon enough when we try to use it. UDEVADM="udevadm" for f in /bin/udevadm /sbin/udevadm; do if [ -x "$f" ]; then UDEVADM=$f break; fi done # Test if a string is valid to use in a constructed variable name. # We need to explicitly check this to avoid having "undefined" but wrong things # happen if we dereference an invalid indirect variable name. A "name" in bash # is defined as: # # 'A word consisting only of alphanumeric characters and underscores, and # beginning with an alphabetic character or an under‐score.' # # With an implicit assumption that all those characters are also only ASCII. # We don't need to validate that the first character isn't a digit here, because # we know we will always be appending this to a valid prefix string before use. # We do want to validate that it's not an empty string though. is_valid_as_variable_name() { # If we could be sure this would only run with bash 4.3 or later, then # we could use 'shopt -s globasciiranges' and drop the [:ascii:] test, # but Wheezy still has bash 4.2 - alternatively we could force use of the # C locale here to avoid having non-ascii characters collated into the # range a-z, but not being locale agnostic is ugly, so just test against # the :ascii: character class explicitly. [[ -n $1 && $1 != *[^a-zA-Z0-9_]* && $1 != *[^[:ascii:]]* ]] } # Find the shell-friendly "config name" for the given libvirt domain name. # If the guest name contains unicode characters, or anything else which would # make it illegal to use as part of a bash variable name (like a '-'), then # it needs to be explicitly mapped to a valid identifier with a DOMAIN_NAME_* # declaration in the config file. get_config_for_guest() { for n in "${!DOMAIN_NAME_@}"; do if [ "${!n}" = "$1" ]; then config_name=${n#DOMAIN_NAME_} return fi done if is_valid_as_variable_name "$1"; then config_name=$1 else #echo "Invalid config name '$1'" config_name='' fi } if [ "$operation" = "started" ]; then get_config_for_guest "$guest_name" if [ -n "$config_name" ]; then devices="DOMAIN_RNG_${config_name}[@]" opts=( -c change -s usb -a "idVendor=0403" -a "idProduct=7840" ) for d in "${!devices}"; do "$UDEVADM" trigger "${opts[@]}" -a "serial=$d" done fi fi # Always return success here, we don't want to abort guest operations. exit 0 # vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/libvirt/vm.conf0000644000000000000000000000347414136173163013376 0ustar # System configuration file for bbvirt. # This file is sourced as a bash shell script, see bbvirt(1) for more details. # BitBabbler devices are assigned to libvirt managed virtual machines here # through the use of a pair of variables for each VM domain that devices are # to be passed through to. Any number of devices and libvirt domains can be # configured here. # # DOMAIN_NAME_="" # - Provides a mapping from to the real libvirt domain name when that # name contains unicode, or other special characters which aren't legal to # use in a shell variable name (i.e. anything except ascii A-Z, a-z, 0-9 or # and underscore). If this is not specified, then the default is to assume # that is the literal libvirt domain name which the devices are to be # assigned to by DOMAIN_RNG_. # # DOMAIN_URI_="" # - Is the optional --connect URI passed to virsh(1) for domain . # If not specified, then the system default for virsh (for the user that # is running bbvirt) will be used. # # DOMAIN_RNG_=( ) # - Is a bash array containing the list of device serial numbers which are # to be passed through to domain . It is not an error for devices # which are not currently plugged in to be listed here. If the udev rule # to invoke bbvirt is active, then these devices will all be passed through # to that VM when they are plugged in. You just need to ensure that each # device listed is only allocated to one VM domain, and that they will not # be used by a seedd(1) instance running on the host. # For example: #DOMAIN_URI_sid="qemu:///system" #DOMAIN_RNG_sid=( KWIF4Q JAXJE6 ) #DOMAIN_NAME_kbsd="kbsd-unstable" #DOMAIN_URI_kbsd="qemu+ssh://your.domain/system" #DOMAIN_RNG_kbsd=( G2SJ3Z ) # vi:sts=4:sw=4:et:syntax=sh:filetype=sh bit-babbler-0.9/munin/0002755000000000000000000000000014136173163011552 5ustar bit-babbler-0.9/munin/bit-babbler0000644000000000000000000000630514136173163013644 0ustar # Munin-node configuration for BitBabbler # # The init script in the Debian package defaults to using --socket-group=adm # which means this does not need to run as root. If you prefer to use an # alternative group for that permission, you should use the same group here # as well. If you disable group access to the control socket, this will need # to run as 'user root'. # # The following extra options are configurable: # # env.control_socket - The filesystem path to the control socket to query. # You shouldn't normally need to set this, unless you # have multiple processes controlling the available # BitBabbler devices and want to monitor them all. # # env.persist_devices - If set to 'yes', then all devices which have been # present for at least one run of the script on this # node will continue to be reported as present, even if # they have been disabled or removed. This ensures that # all historical logs for them will continue to be made # available, and that alerts will be reported for them # if they are unexpectedly removed. If not set, or set # to any other value, the history will not be lost, but # the graphs for them may cease to be displayed, and # alerts will not be triggered for them. # # The list of actively persisted devices is recorded in # the plugin state files for bit_babbler-* which can be # found in /var/lib/munin-node/plugin-state/ (or where # your system was configured to put those). You can edit # that file if you do wish to permanently remove devices # from it. # # env.always_include - This may be set to a space separated list of device IDs # (as reported by 'bbctl -s') that you always want to be # graphed. Its operation is similar to persist_devices, # except the list of devices is specified explicitly here # rather than detected automatically. # # Any devices which are present, but which are not listed # here, will still be graphed while they are present, and # will still be automatically included in the persistent # list if persist_devices is enabled. Unless they are # explicitly listed in always_ignore. # # env.always_ignore - This may be set to a space separated list of device IDs # as above, except devices in this list will never be # reported to the munin master, and will be removed from # the persistent list if they were previously in it. If # a device is listed both here and in always_include, it # will be included, but don't do that, you'll just drive # yourself or someone else mad with dissonant confusion. [bit_babbler] group adm env.persist_devices yes bit-babbler-0.9/munin/bit_babbler.in0000644000000000000000000013357714136173163014347 0ustar #!/usr/bin/perl -w # This file is distributed as part of the bit-babbler package. # Copyright 2014 - 2021, Ron # Munin magic markers #%# family=auto #%# capabilities=autoconf use strict; use IO::Socket; use JSON::XS; use Munin::Plugin; my $control_socket = $ENV{'control_socket'} || "@SEEDD_CONTROL_SOCKET@"; my $json; sub cmd_request($) { #{{{ my $request = shift; my $sock = IO::Socket::UNIX->new( Type => SOCK_STREAM, Peer => $control_socket ) or die "Could not create socket: $!\n"; my $max_chunk_size = 65536; my $data; my $msg; my $flags; $sock->send('"' . $request . "\"\0") or die "Failed to send '$request' request: $!\n"; do { $sock->recv($data,$max_chunk_size,$flags) or die "Failed to read reply: $!\n"; $msg .= $data; } while( $data !~ /\0$/ ); chop($msg); $json = eval { JSON::XS->new->decode($msg) }; die "JSON decode failed: $@: $msg\n" if $@; if ($json->[0] ne $request) { die "Unrecognised reply: $json->[0]\n"; } } #}}} sub get_ids() { cmd_request("GetIDs"); } sub get_stats() { cmd_request("ReportStats"); } sub unique_list(@) { my %h; map { $h{$_}++ ? () : $_ } @_; } sub report_bitrate_config(@) { #{{{ print "multigraph bb_bitrate\n"; print "graph_title BitBabbler bytes output\n"; print "graph_vlabel Bytes/second\n"; print "graph_category system\n"; for (@_) { my $f = clean_fieldname($_); print "${f}_qa_passed.label $_\n"; print "${f}_qa_passed.type COUNTER\n"; print "${f}_qa_passed.max 1000000\n"; print "${f}_qa_passed.info Good entropy output\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_bitrate.output_$f\n"; print "graph_title BitBabbler $_ bytes output\n"; print "graph_vlabel Bytes/second\n"; print "graph_category system\n"; print "graph_info This graph shows the demand for entropy (and the rate at " . "which it is able to be delivered). The discarded rate is entropy which " . "was read from the device, but which was not used because either the " . "QA checks are currently failing, or they are still confirming whether " . "a failure was really a transient anomaly or not. It is not unusual to " . "have some entropy discarded at first start up, since the QA checking " . "puts the initial onus on the source to prove it is good, and will not " . "pass entropy from it until it does. The passed rate includes entropy " . "that is output even when none is being consumed, which is used to keep " . "the pools constantly fresh.\n"; print "${f}_qa_passed.label Passed\n"; print "${f}_qa_passed.type COUNTER\n"; print "${f}_qa_passed.max 1000000\n"; print "${f}_qa_passed.info Good entropy output\n"; print "${f}_qa_unpassed.label Discarded\n"; print "${f}_qa_unpassed.type COUNTER\n"; print "${f}_qa_unpassed.max 1000000\n"; print "${f}_qa_unpassed.warning 1\n"; print "${f}_qa_unpassed.info Discarded entropy\n"; } } #}}} sub report_bitrate_values(@) { #{{{ print "multigraph bb_bitrate\n"; for (@_) { my $f = clean_fieldname($_); my $qa = $json->[2]{$_}{'QA'} if exists $json->[2]{$_}; print "${f}_qa_passed.value " . ($qa ? $qa->{'BytesPassed'} : "U") . "\n"; } for (@_) { my $f = clean_fieldname($_); my $qa = $json->[2]{$_}{'QA'} if exists $json->[2]{$_}; print "multigraph bb_bitrate.output_$f\n"; if (defined $qa) { print "${f}_qa_passed.value $qa->{'BytesPassed'}\n"; print "${f}_qa_unpassed.value " . ($qa->{'BytesAnalysed'} - $qa->{'BytesPassed'}) . "\n"; } else { print "${f}_qa_passed.value U\n"; print "${f}_qa_unpassed.value U\n"; } } } #}}} sub report_ent_config(@) { #{{{ # Check if the Ent test long statistics are expected to have converged on # their threshold limits. We don't want to bark warnings here until then. # # This is a bit ugly, it means we fetch the stats used to output the values # during both the config and the fetch phases. We could cache them during # fetch, but that's backward and the first config run will potentially read # stale data from the cache (triggering a burst of false warnings, which is # exactly what this extra complication is here to solve ...). We could # instead cache them here, and read that during fetch -- but it's not clear # that's actually simpler or in any way more efficient than just querying # for them twice ... We could also add a simpler request to the protocol, # that only fetches the sample counts we need to inspect here, but that may # be overengineering our way around this too. # # What we do here then, is check if Ent::Limits::long_minsamples has been # reached yet for each device we are graphing, and flag each of them that # have. We then use that to decide whether to include the warning limits # in the config for each graph. This avoids the situation where devices # that aren't having a lot of data read from them, and which may take many # hours, or even days, to reach the long test convergence thresholds, will # be reporting 'false' warnings each time the process is restarted until # that finally happens. With this, the warnings will only trigger in the # cases where the test is considered to have actually failed and the output # from the device is being suppressed. Which is the only case we really # want to alert the admin about. Crying wolf when the test is not actually # valid yet will just lead someone to ignore a real failure - and relaxing # the thresholds for warnings here to avoid that would mean a real failure # might go unnoticed for a longer period than would be ideal, or be harder # to pinpoint the real cause when some other metric alerts them to it. # # So it's better to be a little bit ugly here, than a lot ugly for users. # This could be a lot easier if munin didn't split each request into two # separate phases that both occur for every poll. my %warn; get_stats(); for (keys %{$json->[2]}) { my $f = clean_fieldname($_); my $ent8 = $json->[2]{$_}{'Ent8'}; my $ent16 = $json->[2]{$_}{'Ent16'}; $warn{$f}{'Ent8'} = 1 if (defined $ent8 && $ent8->{'Long'}{'Samples'} > 250000000); $warn{$f}{'Ent16'} = 1 if (defined $ent16 && $ent16->{'Long'}{'Samples'} > 500000000); } print "multigraph bb_ent\n"; print "graph_title BitBabbler Ent tests\n"; print "graph_args --alt-autoscale --alt-y-grid\n"; print "graph_vlabel Shannon entropy (per 8 bits)\n"; print "graph_scale no\n"; print "graph_printf %9.6lf\n"; print "graph_category system\n"; for (@_) { my $f = clean_fieldname($_); print "${f}_ent_entropy_short.label $_ short term\n"; print "${f}_ent_entropy_short.info Short term entropy estimate\n"; print "${f}_ent_entropy_long.label $_ long term\n"; print "${f}_ent_entropy_long.info Long term entropy estimate\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_ent.chisq16_$f\n"; print "graph_title BitBabbler $_ Chi^2 distribution (16-bit)\n"; print "graph_args --alt-autoscale" . " 'HRULE:66659.52#ffaaaa:Random will exceed 66659.52 less than 0.1% of the time'" . " 'COMMENT: \\j'" . " 'HRULE:64421.97#ffaaaa:Random will exceed 64421.97 more than 99.9% of the time'" . " 'COMMENT: \\j'" . "\n"; print "graph_vlabel Chi^2\n"; print "graph_scale no\n"; print "graph_printf %8.2lf\n"; print "graph_category system\n"; print "graph_info This graph shows the results of Pearson's Chi-squared test " . "for short and long sequences of 16-bit samples. The short term result is " . "a test of the 100 million most recently generated samples. The long term " . "result is computed over all samples generated since the process being " . "queried began.

" . "A statistically random sequence would be expected to exceed 64421.97 99.9% " . "of the time, 64695.73 99% of the time, and 64940.64 95% of the time." . " A Chi-squared statistic smaller than this indicates the sample values " . "were more uniformly distributed than would normally be expected from a " . "random selection.

" . "At the opposite end of expectation, it is likely to exceed 66131.63 only " . "5% of the time, 66380.17 1% of the time, and 66659.52 just 0.1% of the time." . " A Chi-squared statistic larger than this indicates the sample values " . "were less uniformly distributed than would normally be expected from a " . "random selection.

" . "A sustained rate of results outside of these bounds for the short term " . "test would indicate a systemic failure. Since the long term test is " . "continually accumulating upon the same set of data, it may be expected " . "to take fairly long duration excursions out to the extreme limits of " . "probability before eventually returning to a more expected range.\n"; # Roughly 1 in 100 million chance of passing the warning thresholds # Roughly buckley's of passing the critical thresholds in normal operation print "${f}_ent16_chisq_short.label Short term\n"; print "${f}_ent16_chisq_short.line " . "64940.644:cccccc:Random will exceed 64940 more than 95% of the time\n"; print "${f}_ent16_chisq_short.warning 321:67459\n"; print "${f}_ent16_chisq_short.critical 35:70000\n"; print "${f}_ent16_chisq_short.info Short term Chi^2 distribution\n"; print "${f}_ent16_chisq_long.label Long term\n"; print "${f}_ent16_chisq_long.line " . "66131.632:cccccc:Random will exceed 66131 less than 5% of the time\n"; print "${f}_ent16_chisq_long.warning 63823:67265\n"; print "${f}_ent16_chisq_long.critical 35:70000\n"; print "${f}_ent16_chisq_long.info Long term Chi^2 distribution\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_ent.chisq_$f\n"; print "graph_title BitBabbler $_ Chi^2 distribution (8-bit)\n"; print "graph_args --alt-autoscale" . " 'HRULE:330.523#ffaaaa:Random will exceed 330.523 less than 0.1% of the time'" . " 'COMMENT: \\j'" . " 'HRULE:190.869#ffaaaa:Random will exceed 190.869 more than 99.9% of the time'" . " 'COMMENT: \\j'" . "\n"; print "graph_vlabel Chi^2\n"; print "graph_scale no\n"; print "graph_printf %8.2lf\n"; print "graph_category system\n"; print "graph_info This graph shows the results of Pearson's Chi-squared test " . "for short and long sequences of 8-bit samples. The short term result is " . "a test of the 500,000 most recently generated samples. The long term " . "result is computed over all samples generated since the process being " . "queried began.

" . "A statistically random sequence would be expected to exceed 190.869 99.9% " . "of the time, 205.421 99% of the time, and 219.025 95% of the time." . " A Chi-squared statistic smaller than this indicates the sample values " . "were more uniformly distributed than would normally be expected from a " . "random selection.

" . "At the opposite end of expectation, it is likely to exceed 293.248 only " . "5% of the time, 310.457 1% of the time, and 330.523 just 0.1% of the time." . " A Chi-squared statistic larger than this indicates the sample values " . "were less uniformly distributed than would normally be expected from a " . "random selection.

" . "A sustained rate of results outside of these bounds for the short term " . "test would indicate a systemic failure. Since the long term test is " . "continually accumulating upon the same set of data, it may be expected " . "to take fairly long duration excursions out to the extreme limits of " . "probability before eventually returning to a more expected range.\n"; # Roughly 1 in 100 million chance of passing the warning thresholds # Roughly buckley's of passing the critical thresholds in normal operation print "${f}_ent_chisq_short.label Short term\n"; print "${f}_ent_chisq_short.line " . "219.025:cccccc:Random will exceed 219.025 more than 95% of the time\n"; print "${f}_ent_chisq_short.warning 147:400\n"; print "${f}_ent_chisq_short.critical 32:500\n"; print "${f}_ent_chisq_short.info Short term Chi^2 distribution\n"; print "${f}_ent_chisq_long.label Long term\n"; print "${f}_ent_chisq_long.line " . "293.248:cccccc:Random will exceed 293.248 less than 5% of the time\n"; print "${f}_ent_chisq_long.warning 161:377\n"; print "${f}_ent_chisq_long.critical 32:500\n"; print "${f}_ent_chisq_long.info Long term Chi^2 distribution\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_ent.entropy16_$f\n"; print "graph_title BitBabbler $_ estimated entropy (16-bit)\n"; print "graph_args --alt-autoscale --alt-y-grid\n"; print "graph_vlabel Entropy (per 16 bits)\n"; print "graph_scale no\n"; print "graph_printf %9.6lf\n"; print "graph_category system\n"; print "graph_info This graph shows the calculated Shannon and min entropy " . "for a short term sequence of the most recent 100 million samples, and " . "over the long term of all samples generated since the process being " . "queried began. The Shannon entropy is based on the number of times " . "that each possible sequence of 16 bits occurred. The min-entropy is " . "a more conservative estimate that is based only on the number of " . "times that the most frequent sample value was seen.

" . "Note that when this analysis is performed on a 32-bit machine, the " . "long term sample count will 'wrap around' well before the results " . "can converge on their maximum expected value, and that when this does " . "occur a small 'sawtooth' dip is expected to be seen in the results.\n"; print "${f}_ent16_entropy_short.label Shannon entropy short term\n"; print "${f}_ent16_entropy_short.warning 15.9995:\n"; print "${f}_ent16_entropy_short.critical 15.8:\n"; print "${f}_ent16_entropy_short.info Short term Shannon entropy estimate\n"; print "${f}_ent16_entropy_long.label Shannon entropy long term\n"; if (defined $warn{$f}{'Ent16'}) { print "${f}_ent16_entropy_long.warning 15.9999:\n"; print "${f}_ent16_entropy_long.critical 15.99:\n"; } print "${f}_ent16_entropy_long.info Long term Shannon entropy estimate\n"; print "${f}_ent16_minentropy_short.label Min-entropy short term\n"; print "${f}_ent16_minentropy_short.warning 15.708:\n"; print "${f}_ent16_minentropy_short.critical 15.7:\n"; print "${f}_ent16_minentropy_short.info Short term min-entropy estimate\n"; print "${f}_ent16_minentropy_long.label Min-entropy long term\n"; if (defined $warn{$f}{'Ent16'}) { print "${f}_ent16_minentropy_long.warning 15.893:\n"; print "${f}_ent16_minentropy_long.critical 15.8:\n"; } print "${f}_ent16_minentropy_long.info Long term min-entropy estimate\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_ent.entropy_$f\n"; print "graph_title BitBabbler $_ estimated entropy (8-bit)\n"; print "graph_args --alt-autoscale --alt-y-grid\n"; print "graph_vlabel Entropy (per 8 bits)\n"; print "graph_scale no\n"; print "graph_printf %9.6lf\n"; print "graph_category system\n"; print "graph_info This graph shows the calculated Shannon and min entropy " . "for a short term sequence of the most recent 500,000 samples, and " . "over the long term of all samples generated since the process being " . "queried began. The Shannon entropy is based on the number of times " . "that each possible sequence of 8 bits occurred. The min-entropy is " . "a more conservative estimate that is based only on the number of " . "times that the most frequent sample value was seen.

" . "Note that when this analysis is performed on a 32-bit machine, the " . "long term sample count will 'wrap around' well before the min entropy " . "results can converge on their maximum expected value, and that when " . "this does occur a tiny 'sawtooth' dip is expected to be seen in the " . "results.\n"; print "${f}_ent_entropy_short.label Shannon entropy short term\n"; print "${f}_ent_entropy_short.warning 7.999:\n"; print "${f}_ent_entropy_short.critical 7.8:\n"; print "${f}_ent_entropy_short.info Short term Shannon entropy estimate\n"; print "${f}_ent_entropy_long.label Shannon entropy long term\n"; if (defined $warn{$f}{'Ent8'}) { print "${f}_ent_entropy_long.warning 7.999999:\n"; print "${f}_ent_entropy_long.critical 7.999:\n"; } print "${f}_ent_entropy_long.info Long term Shannon entropy estimate\n"; print "${f}_ent_minentropy_short.label Min-entropy short term\n"; print "${f}_ent_minentropy_short.warning 7.73:\n"; print "${f}_ent_minentropy_short.critical 7.7:\n"; print "${f}_ent_minentropy_short.info Short term min-entropy estimate\n"; print "${f}_ent_minentropy_long.label Min-entropy long term\n"; if (defined $warn{$f}{'Ent8'}) { print "${f}_ent_minentropy_long.warning 7.99:\n"; print "${f}_ent_minentropy_long.critical 7.9:\n"; } print "${f}_ent_minentropy_long.info Long term min-entropy estimate\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_ent.mean16_$f\n"; print "graph_title BitBabbler $_ mean value (16-bit)\n"; if (defined $warn{$f}{'Ent16'}) { print "graph_args --alt-autoscale --alt-y-grid\n"; } else { print "graph_args --alt-autoscale --alt-y-grid" . " HRULE:32765.63#bbbbff" . " HRULE:32769.37#bbbbff" . "\n"; } print "graph_vlabel Mean of all samples\n"; print "graph_scale no\n"; print "graph_printf %10.6lf\n"; print "graph_category system\n"; print "graph_info This graph shows a simple arithmetic mean of 16-bit samples " . "over short and long term sequences. The short term result is a test " . "of the 100 million most recently generated samples. The long term result " . "is calculated over all samples generated since the process being queried " . "began. An unbiased sequence would be expected to converge on 32767.5 over " . "the long term, but the 16-bit mean can require a large number of samples " . "before it does.\n"; print "${f}_ent16_mean_short.label Short term\n"; print "${f}_ent16_mean_short.line 32767.5:bbbbbb\n"; print "${f}_ent16_mean_short.warning 32759.81:32775.19\n"; print "${f}_ent16_mean_short.critical 32757.5:32777.5\n"; print "${f}_ent16_mean_short.info Short term mean\n"; print "${f}_ent16_mean_long.label Long term\n"; if (defined $warn{$f}{'Ent16'}) { print "${f}_ent16_mean_long.warning 32765.63:32769.37\n"; print "${f}_ent16_mean_long.critical 32762.5:32772.5\n"; } print "${f}_ent16_mean_long.info Long term mean\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_ent.mean_$f\n"; print "graph_title BitBabbler $_ mean value (8-bit)\n"; if (defined $warn{$f}{'Ent8'}) { print "graph_args --alt-autoscale --alt-y-grid\n"; } else { print "graph_args --alt-autoscale --alt-y-grid" . " HRULE:127.481#bbbbff" . " HRULE:127.519#bbbbff" . "\n"; } print "graph_vlabel Mean of all samples\n"; print "graph_scale no\n"; print "graph_printf %10.6lf\n"; print "graph_category system\n"; print "graph_info This graph shows a simple arithmetic mean of 8-bit samples " . "over short and long term sequences. The short term result is a test " . "of the 500,000 most recently generated samples. The long term result " . "is calculated over all samples generated since the process being queried " . "began. An unbiased sequence would be expected to converge on 127.5 over " . "the long term.\n"; print "${f}_ent_mean_short.label Short term\n"; print "${f}_ent_mean_short.warning 126.92:128.08\n"; print "${f}_ent_mean_short.critical 126.5:128.5\n"; print "${f}_ent_mean_short.info Short term mean\n"; print "${f}_ent_mean_long.label Long term\n"; if (defined $warn{$f}{'Ent8'}) { print "${f}_ent_mean_long.warning 127.481:127.519\n"; print "${f}_ent_mean_long.critical 127.0:128.0\n"; } print "${f}_ent_mean_long.info Long term mean\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_ent.pi_error_$f\n"; print "graph_title BitBabbler $_ Monte Carlo test (24-bit)\n"; if (defined $warn{$f}{'Ent8'}) { print "graph_args --base 1000\n"; # Don't inherit parent args } else { print "graph_args --base 1000 HRULE:-0.03#bbbbff HRULE:0.03#bbbbff\n"; } print "graph_vlabel % error calculating Pi\n"; print "graph_scale no\n"; print "graph_printf %6.4lf\n"; print "graph_category system\n"; print "graph_info This graph shows the error in computing the value of " . "Pi using the 'Monte Carlo Method'. Consecutive sequences of " . "24 bits are taken as X and Y coordinates inside a square. " . "Since a circle inscribed in that square occupies Pi/4 of its " . "area, then a uniformly distributed set of random points should " . "fall inside or outside the radius of the circle with a ratio " . "that when multiplied by 4 gives an approximation for Pi. The " . "short term result is a test of the most recent 500,000 samples. " . "The long term result is computed over all samples generated since " . "the process being queried began. The results are graphed as the " . "percentage of error relative to the real value of Pi. This test " . "is relatively slow to converge on an accurate estimation, but a " . "sustained or persistently diverging inaccuracy in the estimation " . "would indicate a systemic error in the uniformity of the sample " . "values.\n"; print "${f}_ent_pi_error_short.label Short term\n"; print "${f}_ent_pi_error_short.warning -0.97:0.97\n"; print "${f}_ent_pi_error_short.critical -2.0:2.0\n"; print "${f}_ent_pi_error_short.info Short term error percentage\n"; print "${f}_ent_pi_error_long.label Long term\n"; if (defined $warn{$f}{'Ent8'}) { print "${f}_ent_pi_error_long.warning -0.03:0.03\n"; print "${f}_ent_pi_error_long.critical -1.0:1.0\n"; } print "${f}_ent_pi_error_long.info Long term error percentage\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_ent.autocorr16_$f\n"; print "graph_title BitBabbler $_ serial correlation (16-bit)\n"; if (defined $warn{$f}{'Ent16'}) { print "graph_args --base 1000\n"; # Don't inherit parent args } else { print "graph_args --base 1000 HRULE:-0.00008#bbbbff HRULE:0.00008#bbbbff\n"; } print "graph_vlabel Serial correlation coefficient\n"; print "graph_scale yes\n"; print "graph_printf %7.3lf\n"; print "graph_category system\n"; print "graph_info This graph shows the autocorrelation coefficient for " . "a lag of 1 over the sequence of samples. This gives a measure " . "of the extent to which each sample is related to the previous one. " . "A perfectly predictable stream will converge on a result of 1.0, " . "and a perfectly unpredictable one will converge on a result of 0." . "The short term result is a test of the 100 million most recently " . "generated samples. The long term result is computed over all " . "samples generated since the process being queried began. " . "A sustained divergence away from 0 or values close to +/- 1 " . "indicate a problem that ought to be investigated.\n"; print "${f}_ent16_autocorr_short.label Short term\n"; print "${f}_ent16_autocorr_short.warning -0.00044:0.00044\n"; print "${f}_ent16_autocorr_short.critical -0.005:0.005\n"; print "${f}_ent16_autocorr_short.info Short term serial correlation\n"; print "${f}_ent16_autocorr_long.label Long term\n"; if (defined $warn{$f}{'Ent16'}) { print "${f}_ent16_autocorr_long.warning -0.00011:0.00011\n"; print "${f}_ent16_autocorr_long.critical -0.001:0.001\n"; } print "${f}_ent16_autocorr_long.info Long term serial correlation\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_ent.autocorr_$f\n"; print "graph_title BitBabbler $_ serial correlation (8-bit)\n"; if (defined $warn{$f}{'Ent8'}) { print "graph_args --base 1000\n"; # Don't inherit parent args } else { print "graph_args --base 1000 HRULE:-0.0002#bbbbff HRULE:0.0002#bbbbff\n"; } print "graph_vlabel Serial correlation coefficient\n"; print "graph_scale yes\n"; print "graph_printf %7.3lf\n"; print "graph_category system\n"; print "graph_info This graph shows the autocorrelation coefficient for " . "a lag of 1 over the sequence of samples. This gives a measure " . "of the extent to which each sample is related to the previous one. " . "A perfectly predictable stream will converge on a result of 1.0, " . "and a perfectly unpredictable one will converge on a result of 0." . "The short term result is a test of the 500,000 most recently " . "generated samples. The long term result is computed over all " . "samples generated since the process being queried began. " . "A sustained divergence away from 0 or values close to +/- 1 " . "indicate a problem that ought to be investigated.\n"; print "${f}_ent_autocorr_short.label Short term\n"; print "${f}_ent_autocorr_short.warning -0.0078:0.0078\n"; print "${f}_ent_autocorr_short.critical -0.009:0.009\n"; print "${f}_ent_autocorr_short.info Short term serial correlation\n"; print "${f}_ent_autocorr_long.label Long term\n"; if (defined $warn{$f}{'Ent8'}) { print "${f}_ent_autocorr_long.warning -0.00025:0.00025\n"; print "${f}_ent_autocorr_long.critical -0.005:0.005\n"; } print "${f}_ent_autocorr_long.info Long term serial correlation\n"; } } #}}} sub report_ent_values(@) { #{{{ print "multigraph bb_ent\n"; for (@_) { my $f = clean_fieldname($_); my $ent = $json->[2]{$_}{'Ent8'} if exists $json->[2]{$_}; if (defined $ent) { print "${f}_ent_entropy_short.value $ent->{'Short'}{'Current'}{'Entropy'}\n"; print "${f}_ent_entropy_long.value $ent->{'Long'}{'Current'}{'Entropy'}\n"; } else { print "${f}_ent_entropy_short.value U\n"; print "${f}_ent_entropy_long.value U\n"; } } for my $n ('', '16') { my $e = $n ? 'Ent16' : 'Ent8'; for (@_) { my $f = clean_fieldname($_); my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; print "multigraph bb_ent.chisq${n}_$f\n"; if (defined $ent) { print "${f}_ent${n}_chisq_short.value $ent->{'Short'}{'Current'}{'Chisq'}\n"; print "${f}_ent${n}_chisq_long.value $ent->{'Long'}{'Current'}{'Chisq'}\n"; } else { print "${f}_ent${n}_chisq_short.value U\n"; print "${f}_ent${n}_chisq_long.value U\n"; } } for (@_) { my $f = clean_fieldname($_); my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; print "multigraph bb_ent.entropy${n}_$f\n"; if (defined $ent) { print "${f}_ent${n}_entropy_short.value $ent->{'Short'}{'Current'}{'Entropy'}\n"; print "${f}_ent${n}_entropy_long.value $ent->{'Long'}{'Current'}{'Entropy'}\n"; print "${f}_ent${n}_minentropy_short.value $ent->{'Short'}{'Current'}{'MinEntropy'}\n"; print "${f}_ent${n}_minentropy_long.value $ent->{'Long'}{'Current'}{'MinEntropy'}\n"; } else { print "${f}_ent${n}_entropy_short.value U\n"; print "${f}_ent${n}_entropy_long.value U\n"; print "${f}_ent${n}_minentropy_short.value U\n"; print "${f}_ent${n}_minentropy_long.value U\n"; } } for (@_) { my $f = clean_fieldname($_); my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; print "multigraph bb_ent.mean${n}_$f\n"; if (defined $ent) { print "${f}_ent${n}_mean_short.value $ent->{'Short'}{'Current'}{'Mean'}\n"; print "${f}_ent${n}_mean_long.value $ent->{'Long'}{'Current'}{'Mean'}\n"; } else { print "${f}_ent${n}_mean_short.value U\n"; print "${f}_ent${n}_mean_long.value U\n"; } } for (@_) { my $f = clean_fieldname($_); my $ent = $json->[2]{$_}{$e} if exists $json->[2]{$_}; print "multigraph bb_ent.autocorr${n}_$f\n"; if (defined $ent) { print "${f}_ent${n}_autocorr_short.value $ent->{'Short'}{'Current'}{'Autocorr'}\n"; print "${f}_ent${n}_autocorr_long.value $ent->{'Long'}{'Current'}{'Autocorr'}\n"; } else { print "${f}_ent${n}_autocorr_short.value U\n"; print "${f}_ent${n}_autocorr_long.value U\n"; } } } for (@_) { my $f = clean_fieldname($_); my $ent = $json->[2]{$_}{'Ent8'} if exists $json->[2]{$_}; print "multigraph bb_ent.pi_error_$f\n"; if (defined $ent) { print "${f}_ent_pi_error_short.value $ent->{'Short'}{'Current'}{'Pi-error'}\n"; print "${f}_ent_pi_error_long.value $ent->{'Long'}{'Current'}{'Pi-error'}\n"; } else { print "${f}_ent_pi_error_short.value U\n"; print "${f}_ent_pi_error_long.value U\n"; } } } #}}} sub report_fips_pass_config(@) { #{{{ print "multigraph bb_fips_pass\n"; print "graph_title BitBabbler FIPS 140-2 pass run length\n"; print "graph_vlabel Consecutive tests without failure\n"; print "graph_scale no\n"; print "graph_printf %6.0lf\n"; print "graph_category system\n"; print "graph_info This graph shows the run length between FIPS 140-2 test " . "failures. A correctly working system should expect to see failure of " . "the FIPS 140-2 tests about once in every 1250 blocks tested on average." . " Occasional runs of much longer than that can be reasonably expected, " . "with a run of 17500 or longer expected about once in 1.2 million tests " . "(about 3.5TB of samples). A sustained lack of failures would indicate " . "a problem that ought to be investigated.\n"; for (@_) { my $f = clean_fieldname($_); print "${f}_pass_avg_short.label $_\n"; print "${f}_pass_avg_short.info Short term average run of tests without failure\n"; } print "multigraph bb_fips_pass.longest\n"; print "graph_title BitBabbler FIPS 140-2 longest pass run\n"; print "graph_scale no\n"; print "graph_printf %6.0lf\n"; print "graph_category system\n"; print "graph_info This graph shows the longest run of consecutive blocks " . "without a FIPS 140-2 test failure, since the process being queried " . "began. A run of 17500 or longer is expected about once in 1.2 million " . "blocks tested (about 3.5TB of samples), but runs longer than that are " . "not impossible, just increasingly rare. The average rate graph is a " . "better measure of correct operation than this one, but consistently " . "unusual results for the peak run length would be something that ought " . "to be investigated more closely.\n"; for (@_) { my $f = clean_fieldname($_); print "${f}_pass_max.label $_\n"; print "${f}_pass_max.info Longest run of tests without failure\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_fips_pass.qa_$f\n"; print "graph_title BitBabbler $_ FIPS 140-2 pass run length\n"; print "graph_scale no\n"; print "graph_printf %6.0lf\n"; print "graph_category system\n"; print "${f}_pass_avg_short.label Short term average\n"; print "${f}_pass_avg_short.warning 20000\n"; print "${f}_pass_avg_short.info Average run of tests without failure\n"; print "${f}_pass_avg_long.label Long term average\n"; print "${f}_pass_avg_long.warning 20000\n"; print "${f}_pass_avg_long.info Average run of tests without failure\n"; } } #}}} sub report_fips_pass_values(@) { #{{{ print "multigraph bb_fips_pass\n"; for (@_) { my $f = clean_fieldname($_); my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; print "${f}_pass_avg_short.value " . ($fips ? $fips->{'Result'}{'PassRuns'}{'Short'} : "U") . "\n"; } print "multigraph bb_fips_pass.longest\n"; for (@_) { my $f = clean_fieldname($_); my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; print "${f}_pass_max.value " . ($fips ? $fips->{'Result'}{'PassRuns'}{'Peak'} : "U") . "\n"; } for (@_) { my $f = clean_fieldname($_); my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; print "multigraph bb_fips_pass.qa_$f\n"; if (defined $fips) { print "${f}_pass_avg_short.value $fips->{'Result'}{'PassRuns'}{'Short'}\n"; print "${f}_pass_avg_long.value $fips->{'Result'}{'PassRuns'}{'Long'}\n"; } else { print "${f}_pass_avg_short.value U\n"; print "${f}_pass_avg_long.value U\n"; } } } #}}} sub report_fips_fail_config(@) { #{{{ print "multigraph bb_fips_fail\n"; print "graph_title BitBabbler FIPS 140-2 testing\n"; print "graph_vlabel Failed per 1000: long(-) / short(+) term\n"; print "graph_scale no\n"; print "graph_printf %6.4lf\n"; print "graph_category system\n"; print "graph_info This graph shows the long and short term failure rates " . "for the FIPS 140-2 tests on each source. The short term average " . "tracks a window of the last 1000 tests. A correctly working system " . "should expect to converge on just under 0.8 failures per thousand as " . "the long term trend, with with the short term average varying from 0 " . "with occasional peaks over 5 (as the rare, but not quite infinitely " . "improbable, rough upper bound). A sustained short term rate greater " . "than that would indicate a systemic failure.\n"; my $first = 1; for (@_) { my $f = clean_fieldname($_); print "${f}_l.label $_\n"; print "${f}_l.graph no\n"; print "${f}_l.line -0.829:bbbbbb\n" if $first; print "${f}_l.info Long term average rate of failures\n"; print "${f}_s.label $_\n"; print "${f}_s.negative ${f}_l\n"; print "${f}_s.line 0.829:bbbbbb:Expected average rate\n" if $first; print "${f}_s.info Short term rolling average rate of failures\n"; $first = 0; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_fips_fail.qa_$f\n"; print "graph_title BitBabbler $_ average FIPS 140-2 failure rate\n"; print "graph_vlabel Failed per 1000: long(-) / short(+) term\n"; print "graph_scale no\n"; print "graph_printf %6.4lf\n"; print "graph_category system\n"; print "graph_info This graph shows the long and short term failure rates " . "for the FIPS 140-2 tests. The short term average tracks a window " . "of the last 1000 tests. A correctly working system should expect " . "to converge on around 0.8 failures per thousand (of any test) as " . "the long term trend, with with the short term average varying from " . "0 with occasional peaks over 5 (as the rare, but not infinitely " . "improbable, rough upper bound). A sustained short term rate greater " . "than that would indicate a systemic failure. The expected (long term) " . "rates of failure for each individual test are indicated below.\n"; print "${f}_l.label Failure rate\n"; print "${f}_l.graph no\n"; print "${f}_l.line -0.829:bbbbbb\n"; print "${f}_s.label Failure rate\n"; print "${f}_s.negative ${f}_l\n"; print "${f}_s.line 0.829:bbbbbb:Expected average rate\n"; print "${f}_s.info Failure of any test\n"; print "${f}_s.warning 5.5\n"; print "${f}_s.critical 10.0\n"; print "${f}_monl.label Monobit\n"; print "${f}_monl.graph no\n"; print "${f}_mons.label Monobit\n"; print "${f}_mons.negative ${f}_monl\n"; print "${f}_mons.info Expect 0.104 per 1000\n"; print "${f}_pokl.label Poker\n"; print "${f}_pokl.graph no\n"; print "${f}_poks.label Poker\n"; print "${f}_poks.negative ${f}_pokl\n"; print "${f}_poks.info Expect 0.099 per 1000\n"; print "${f}_runl.label Runs\n"; print "${f}_runl.graph no\n"; print "${f}_runs.label Runs\n"; print "${f}_runs.negative ${f}_runl\n"; print "${f}_runs.info Expect 0.328 per 1000\n"; print "${f}_lrl.label Long run\n"; print "${f}_lrl.graph no\n"; print "${f}_lrs.label Long run\n"; print "${f}_lrs.negative ${f}_lrl\n"; print "${f}_lrs.info Expect 0.298 per 1000\n"; print "${f}_repl.label Repetition\n"; print "${f}_repl.graph no\n"; print "${f}_reps.label Repetition\n"; print "${f}_reps.negative ${f}_repl\n"; print "${f}_reps.info Expect to be very rare\n"; } for (@_) { my $f = clean_fieldname($_); print "multigraph bb_fips_fail.peak_$f\n"; print "graph_title BitBabbler $_ peak FIPS 140-2 failure rate\n"; print "graph_vlabel Max failure rate (per 1000 tests)\n"; print "graph_scale no\n"; print "graph_printf %6.4lf\n"; print "graph_category system\n"; print "graph_info This graph shows the worst case failure rates for the " . "FIPS 140-2 tests since the process we are querying began. These " . "are the peak values seen as the short term average over a window " . "of the last 1000 tests.\n"; print "${f}_p.label Failure rate\n"; print "${f}_p.info Failure of any test\n"; print "${f}_monp.label Monobit\n"; print "${f}_pokp.label Poker\n"; print "${f}_runp.label Runs\n"; print "${f}_lrp.label Long run\n"; print "${f}_repp.label Repetition\n"; } } #}}} sub report_fips_fail_values(@) { #{{{ print "multigraph bb_fips_fail\n"; for (@_) { my $f = clean_fieldname($_); my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; if (defined $fips) { print "${f}_l.value " . $fips->{'Result'}{'FailRate'}{'Long'} * 1000 . "\n"; print "${f}_s.value " . $fips->{'Result'}{'FailRate'}{'Short'} * 1000 . "\n"; } else { print "${f}_l.value U\n"; print "${f}_s.value U\n"; } } for (@_) { my $f = clean_fieldname($_); my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; print "multigraph bb_fips_fail.qa_$f\n"; if (defined $fips) { print "${f}_l.value " . $fips->{'Result'}{'FailRate'}{'Long'} * 1000 . "\n"; print "${f}_s.value " . $fips->{'Result'}{'FailRate'}{'Short'} * 1000 . "\n"; print "${f}_monl.value " . $fips->{'Monobit'}{'FailRate'}{'Long'} * 1000 . "\n"; print "${f}_mons.value " . $fips->{'Monobit'}{'FailRate'}{'Short'} * 1000 . "\n"; print "${f}_pokl.value " . $fips->{'Poker'}{'FailRate'}{'Long'} * 1000 . "\n"; print "${f}_poks.value " . $fips->{'Poker'}{'FailRate'}{'Short'} * 1000 . "\n"; print "${f}_runl.value " . $fips->{'Runs'}{'FailRate'}{'Long'} * 1000 . "\n"; print "${f}_runs.value " . $fips->{'Runs'}{'FailRate'}{'Short'} * 1000 . "\n"; print "${f}_lrl.value " . $fips->{'Long run'}{'FailRate'}{'Long'} * 1000 . "\n"; print "${f}_lrs.value " . $fips->{'Long run'}{'FailRate'}{'Short'} * 1000 . "\n"; print "${f}_repl.value " . $fips->{'Repetition'}{'FailRate'}{'Long'} * 1000 . "\n"; print "${f}_reps.value " . $fips->{'Repetition'}{'FailRate'}{'Short'} * 1000 . "\n"; } else { print "${f}_l.value U\n"; print "${f}_s.value U\n"; print "${f}_monl.value U\n"; print "${f}_mons.value U\n"; print "${f}_pokl.value U\n"; print "${f}_poks.value U\n"; print "${f}_runl.value U\n"; print "${f}_runs.value U\n"; print "${f}_lrl.value U\n"; print "${f}_lrs.value U\n"; print "${f}_repl.value U\n"; print "${f}_reps.value U\n"; } } for (@_) { my $f = clean_fieldname($_); my $fips = $json->[2]{$_}{'FIPS'} if exists $json->[2]{$_}; print "multigraph bb_fips_fail.peak_$f\n"; if (defined $fips) { print "${f}_p.value " . $fips->{'Result'}{'FailRate'}{'Peak'} * 1000 . "\n"; print "${f}_monp.value " . $fips->{'Monobit'}{'FailRate'}{'Peak'} * 1000 . "\n"; print "${f}_pokp.value " . $fips->{'Poker'}{'FailRate'}{'Peak'} * 1000 . "\n"; print "${f}_runp.value " . $fips->{'Runs'}{'FailRate'}{'Peak'} * 1000 . "\n"; print "${f}_lrp.value " . $fips->{'Long run'}{'FailRate'}{'Peak'} * 1000 . "\n"; print "${f}_repp.value " . $fips->{'Repetition'}{'FailRate'}{'Peak'} * 1000 . "\n"; } else { print "${f}_p.value U\n"; print "${f}_monp.value U\n"; print "${f}_pokp.value U\n"; print "${f}_runp.value U\n"; print "${f}_lrp.value U\n"; print "${f}_repp.value U\n"; } } } #}}} sub report_config() { #{{{ my $persist = $ENV{'persist_devices'} || "no"; my @sources; @sources = restore_state() if $persist eq "yes"; eval { get_ids(); @sources = unique_list(@sources, @{$json->[2]}); }; if (exists $ENV{'always_ignore'}) { my %ignore; my @remains; @ignore{split(' ',$ENV{'always_ignore'})} = (); for (@sources) { push(@remains, $_) unless exists $ignore{$_}; } @sources = @remains; } save_state(@sources) if $persist eq "yes"; @sources = unique_list(@sources, split(' ',$ENV{'always_include'})) if $ENV{'always_include'}; report_bitrate_config(@sources); report_ent_config(@sources); report_fips_pass_config(@sources); report_fips_fail_config(@sources); } #}}} sub report_values() { #{{{ get_stats(); my @sources; if (($ENV{'persist_devices'} || "") eq "yes") { @sources = restore_state(); } else { @sources = keys %{$json->[2]}; if (exists $ENV{'always_ignore'}) { my %ignore; my @remains; @ignore{split(' ',$ENV{'always_ignore'})} = (); for (@sources) { push(@remains, $_) unless exists $ignore{$_}; } @sources = @remains; } } @sources = unique_list(@sources, split(' ',$ENV{'always_include'})) if $ENV{'always_include'}; report_bitrate_values(@sources); report_ent_values(@sources); report_fips_pass_values(@sources); report_fips_fail_values(@sources); } #}}} if (!defined $ARGV[0]) { report_values(); } elsif ($ARGV[0] eq "config") { report_config(); } elsif ($ARGV[0] eq "autoconf") { # If the package providing this is installed, we presume you're going to # want it enabled if munin-node is also installed. We could make this a # bit more nuanced if this script is ever installed on a lot of systems # where that isn't likely to be true. print "yes\n"; } # vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/private_setup.h.in0000644000000000000000000002713014136173163014075 0ustar /* private_setup.h.in. Generated from configure.ac by autoheader. */ /* Define if building universal (internal helper macro) */ #undef AC_APPLE_UNIVERSAL_BUILD /* Macro for function attribute 'cold' */ #undef BB_COLD /* Macro for function attribute 'const' */ #undef BB_CONST /* Macro for statement attribute 'fallthrough' */ #undef BB_FALLTHROUGH /* Macro for function attribute 'noreturn' */ #undef BB_NORETURN /* Macro for function attribute 'no_sanitize("float-divide-by-zero")' */ #undef BB_NO_SANITIZE_FLOAT_DIVIDE_BY_ZERO /* Macro for function attribute 'no_sanitize("unsigned-integer-overflow")' */ #undef BB_NO_SANITIZE_UNSIGNED_INTEGER_OVERFLOW /* Macro for function attribute 'format (__printf__,fmt,arg1)' */ #undef BB_PRINTF_FORMAT /* Macro for function attribute 'pure' */ #undef BB_PURE /* Macro for function attribute 'format (__strftime__,fmt,0)' */ #undef BB_STRFTIME_FORMAT /* Safely stringify a pragma option (which may already include quotes) */ #define EM_PRAGMA(p) _Pragma (#p) /* Save the current diagnostic settings, and try to add or modify some diagnostic Option to have the given Action. Action may be one of: error, warning, or ignored, setting the new disposition of Option. If Option is not recognised by the compiler this request will be silently ignored. Clang-6 stopped using -Wunknown-pragmas (which was implied by -Wpragmas) for these, so we need to silence another option for it (which in turn still needs -Wpragmas, as GCC of course doesn't support clang's new -Wunknown-warning-option. Cooperation is hard ... */ #define EM_TRY_PUSH_DIAGNOSTIC( Action, Option ) \ EM_PRAGMA(GCC diagnostic push) \ EM_PRAGMA(GCC diagnostic ignored "-Wpragmas") \ EM_PRAGMA(GCC diagnostic ignored "-Wunknown-warning-option") \ EM_PRAGMA(GCC diagnostic Action Option) /* Save the current diagnostic settings, and try to add or modify some diagnostic Option to have the given Action. Action may be one of: error, warning, or ignored, setting the new disposition of Option. If Option is not recognised by the compiler this request may itself generate a compile time diagnostic warning or error depending on the compiler defaults and command line options used. */ #define EM_PUSH_DIAGNOSTIC( Action, Option ) \ EM_PRAGMA(GCC diagnostic push) \ EM_PRAGMA(GCC diagnostic Action Option) /* Set the Action for additional diagnostic Options. The current settings are not pushed, so a call to EM_POP_DIAGNOSTIC will revert all changes made since the last time EM_PUSH_DIAGNOSTIC was used. */ #define EM_MORE_DIAGNOSTIC( Action, Option ) \ EM_PRAGMA(GCC diagnostic Action Option) /* Equivalent to EM_TRY_PUSH_DIAGNOSTIC( ignored, Option ) */ #define EM_TRY_PUSH_DIAGNOSTIC_IGNORE( Option ) \ EM_TRY_PUSH_DIAGNOSTIC( ignored, Option ) /* Equivalent to EM_PUSH_DIAGNOSTIC( ignored, Option ) */ #define EM_PUSH_DIAGNOSTIC_IGNORE( Option ) \ EM_PUSH_DIAGNOSTIC( ignored, Option ) /* Equivalent to EM_MORE_DIAGNOSTIC( ignored, Option ) */ #define EM_MORE_DIAGNOSTIC_IGNORE( Option ) \ EM_MORE_DIAGNOSTIC( ignored, Option ) /* Equivalent to EM_TRY_PUSH_DIAGNOSTIC( warning, Option ) */ #define EM_TRY_PUSH_DIAGNOSTIC_WARN( Option ) \ EM_TRY_PUSH_DIAGNOSTIC( warning, Option ) /* Equivalent to EM_PUSH_DIAGNOSTIC( warning, Option ) */ #define EM_PUSH_DIAGNOSTIC_WARN( Option ) \ EM_PUSH_DIAGNOSTIC( warning, Option ) /* Equivalent to EM_MORE_DIAGNOSTIC( warning, Option ) */ #define EM_MORE_DIAGNOSTIC_WARN( Option ) \ EM_MORE_DIAGNOSTIC( warning, Option ) /* Equivalent to EM_TRY_PUSH_DIAGNOSTIC( error, Option ) */ #define EM_TRY_PUSH_DIAGNOSTIC_ERROR( Option ) \ EM_TRY_PUSH_DIAGNOSTIC( error, Option ) /* Equivalent to EM_PUSH_DIAGNOSTIC( error, Option ) */ #define EM_PUSH_DIAGNOSTIC_ERROR( Option ) \ EM_PUSH_DIAGNOSTIC( error, Option ) /* Equivalent to EM_MORE_DIAGNOSTIC( error, Option ) */ #define EM_MORE_DIAGNOSTIC_ERROR( Option ) \ EM_MORE_DIAGNOSTIC( error, Option ) /* Restore the diagnostic state to what it was before the last time it was pushed. If there is no corresponding push the command-line options are restored. */ #define EM_POP_DIAGNOSTIC \ EM_PRAGMA(GCC diagnostic pop) /* build with additional debugging code */ #undef EMDEBUG /* System directory for run-time variable data */ #undef EM_SYSTEM_RUNDIR /* use gettext to localise selected literal strings */ #undef EM_USE_GETTEXT /* Build with service manager NOTIFY_SOCKET support */ #undef EM_USE_NOTIFY_SOCKET /* do extra cleanup to be valgrind clean */ #undef EM_USE_VALGRIND_FRIENDLY /* use wide characters by default for internal string storage */ #undef EM_USE_WIDE_STRINGS /* Define to 1 if translation of program messages to the user's native language is requested. */ #undef ENABLE_NLS /* The fully expanded $bindir path */ #undef EXP_BINDIR /* The fully expanded $datadir path */ #undef EXP_DATADIR /* The fully expanded $docdir path */ #undef EXP_DOCDIR /* The fully expanded $exec_prefix path */ #undef EXP_EXEC_PREFIX /* The fully expanded $includedir path */ #undef EXP_INCLUDEDIR /* The fully expanded $libdir path */ #undef EXP_LIBDIR /* The fully expanded $localedir path */ #undef EXP_LOCALEDIR /* The fully expanded $mandir path */ #undef EXP_MANDIR /* The fully expanded $prefix path */ #undef EXP_PREFIX /* The fully expanded $sbindir path */ #undef EXP_SBINDIR /* Have namespace abi alias to __cxxabiv1 */ #undef HAVE_ABI_ALIAS_TO_CXXABIV1 /* Have abi::__forced_unwind support */ #undef HAVE_ABI_FORCED_UNWIND /* Workaround OpenBSD _thread_flockfile cancellation bug */ #undef HAVE_BROKEN_STDIO_LOCKING /* Define to 1 if you have the Mac OS X function CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */ #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES /* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in the CoreFoundation framework. */ #undef HAVE_CFPREFERENCESCOPYAPPVALUE /* Define to 1 if you have the `clock_gettime' function. */ #undef HAVE_CLOCK_GETTIME /* Define if the GNU dcgettext() function is already present or preinstalled. */ #undef HAVE_DCGETTEXT /* Define to 1 if you have the declaration of `LOG_MAKEPRI', and to 0 if you don't. */ #undef HAVE_DECL_LOG_MAKEPRI /* Define to 1 if you have the declaration of `SIGRTMIN', and to 0 if you don't. */ #undef HAVE_DECL_SIGRTMIN /* Define to 1 if you have the header file. */ #undef HAVE_EXT_HASH_MAP /* Define if the GNU gettext() function is already present or preinstalled. */ #undef HAVE_GETTEXT /* Define to 1 if you have the `gettimeofday' function. */ #undef HAVE_GETTIMEOFDAY /* Define to 1 if you have the `gmtime_r' function. */ #undef HAVE_GMTIME_R /* Define if you have the iconv() function and it works. */ #undef HAVE_ICONV /* The system iconv requires a const char** second argument */ #undef HAVE_ICONV_CONST /* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H /* libudev is available */ #undef HAVE_LIBUDEV /* libusb is available */ #undef HAVE_LIBUSB /* Define to 1 if you have the header file. */ #undef HAVE_LIBUSB_1_0_LIBUSB_H /* Define to 1 if you have the `libusb_get_port_numbers' function. */ #undef HAVE_LIBUSB_GET_PORT_NUMBERS /* Define to 1 if you have the header file. */ #undef HAVE_LIBUSB_H /* Define to 1 if you have the `libusb_has_capability' function. */ #undef HAVE_LIBUSB_HAS_CAPABILITY /* Define to 1 if you have the `libusb_strerror' function. */ #undef HAVE_LIBUSB_STRERROR /* Define to 1 if you have the `localtime_r' function. */ #undef HAVE_LOCALTIME_R /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H /* Define to 1 if you have the `newlocale' function. */ #undef HAVE_NEWLOCALE /* Have GNU style pthread_setname_np(pthread_t thread, const char *name) */ #undef HAVE_PTHREAD_SETNAME_NP_GNU /* Have MacOS style pthread_setname_np(const char *name) */ #undef HAVE_PTHREAD_SETNAME_NP_MAC /* Define to 1 if you have the `pthread_set_name_np' function. */ #undef HAVE_PTHREAD_SET_NAME_NP /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the header file. */ #undef HAVE_STRING_H /* Define to 1 if you have the `strtod_l' function. */ #undef HAVE_STRTOD_L /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the `timegm' function. */ #undef HAVE_TIMEGM /* Define to 1 if you have the header file. */ #undef HAVE_TR1_UNORDERED_MAP /* Define to 1 if you have the `udev_device_get_sysattr_list_entry' function. */ #undef HAVE_UDEV_DEVICE_GET_SYSATTR_LIST_ENTRY /* Define to 1 if you have the `udev_device_get_tags_list_entry' function. */ #undef HAVE_UDEV_DEVICE_GET_TAGS_LIST_ENTRY /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H /* Define to 1 if you have the header file. */ #undef HAVE_UNORDERED_MAP /* Define to 1 if you have the `vasprintf' function. */ #undef HAVE_VASPRINTF /* Define to 1 if you have the header file. */ #undef HAVE_XLOCALE_H /* Define to 1 if you have the `_create_locale' function. */ #undef HAVE__CREATE_LOCALE /* Define to 1 if you have the `_strtod_l' function. */ #undef HAVE__STRTOD_L /* Define as const if the declaration of iconv() needs const. */ #undef ICONV_CONST /* Define this with the path to the iconv utility */ #undef ICONV_UTIL_PATH /* libusb header location */ #undef LIBUSB_HEADER /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the home page for this package. */ #undef PACKAGE_URL /* Define to the version of this package. */ #undef PACKAGE_VERSION /* Set the default to use for the seedd control socket */ #undef SEEDD_CONTROL_SOCKET /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* Explicitly set the per-thread stack size in kB (if non-zero) */ #undef THREAD_STACK_SIZE /* Enable Windows in 'UNICODE' mode */ #undef UNICODE /* Select the MSW version to be compatible with */ #undef WINVER /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN # undef WORDS_BIGENDIAN # endif #endif /* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a `char[]'. */ #undef YYTEXT_POINTER /* Build with libc buffer overflow checks enabled. We need to guard this, because on some platforms the toolchain will already define it as a builtin, and then emit warnings if we redefine it. Ideally, we'd undefine it here and then force our choice of strictness, but we can't do that with autoheader because it sees that as a hook to rewrite. So just let people (yes, we're looking at you Gentoo) reap what they've sown if the toolchain or the environment they use has already defined it. */ #ifndef _FORTIFY_SOURCE # undef _FORTIFY_SOURCE #endif /* Include support for vasprintf et al. */ #undef _GNU_SOURCE /* The MSW NT version to be compatible with */ #undef _WIN32_WINNT #include bit-babbler-0.9/src/0002755000000000000000000000000014136173163011213 5ustar bit-babbler-0.9/src/bbcheck.cpp0000644000000000000000000005671614136173163013315 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2014 - 2018, Ron #include "private_setup.h" #include #include #include #include #include using BitB::USBContext; using BitB::BitBabbler; using BitB::QA::Ent8; using BitB::QA::BitRuns; using BitB::StrToU; using BitB::StrToScaledU; using BitB::StrToScaledUL; using BitB::StrToScaledD; using BitB::Log; using BitB::stringprintf; using std::string; #define CYAN_IF(s) COLOUR_STR_IF(m_options.colour,CYAN,s) class Test : public BitB::RefCounted { //{{{ public: typedef BitB::RefPtr< Test > Handle; typedef std::list< Handle > List; struct Options { //{{{ size_t test_len; unsigned block_size; unsigned bitrate_max; unsigned bitrate_min; bool show_all; bool colour; BitBabbler::Options bboptions; Options() : test_len( 1024 * 1024 ) , block_size( 65536 ) , bitrate_max( 5000000 ) , bitrate_min( 3000000 ) , show_all( false ) , colour( true ) {} }; //}}} struct Result : public BitB::RefCounted { //{{{ enum TestID { //{{{ BITRUN_BIAS, BITRUN_CHISQ, ENT_ENTROPY, ENT_CHISQ, ENT_MEAN, ENT_PI, ENT_CORR, ENT_MINENTROPY, VALUE_MAX }; //}}} struct Rank { //{{{ typedef std::vector< Rank > Vector; enum Order { ASCENDING, // Smallest values are best DESCENDING // Largest values are best }; size_t index; double value; Order order; Rank( size_t i, double v, Order o = ASCENDING ) : index( i ) , value( v ) , order( o ) {} bool operator<( const Rank &r ) const { if( order == ASCENDING ) return value < r.value; return r.value < value; } }; //}}} struct Ranking { //{{{ Rank::Vector results[ VALUE_MAX ]; void AddResult( TestID t, const Rank &r ) { results[t].push_back( r ); } void SortResults() { for( size_t i = 0; i < VALUE_MAX; ++i ) std::stable_sort( results[i].begin(), results[i].end() ); } const char *ResultColour( TestID t, size_t test_index ) const { //{{{ // We're not terribly precious about colour palettes here, but // normal bold yellow is insanely bright and as the 3rd best option // we don't want it screaming for the most attention, so make the // 2 - 4th place options about the same brightness, and darker than // the best one, and make the worst one even darker still, so the // red doesn't stand out too intensely either. size_t n = results[t].size(); if( n > 0 && results[t][0].index == test_index ) return BOLD_GREEN; if( n > 1 && results[t][1].index == test_index ) return MID_GREEN; if( n > 2 && results[t][2].index == test_index ) return MID_YELLOW; if( n > 3 && results[t][3].index == test_index ) return MID_ORANGE; if( n > 4 && results[t][n-1].index == test_index ) return DARK_RED; return ""; } //}}} }; //}}} typedef BitB::RefPtr< Result > Handle; typedef std::vector< Handle > Vector; unsigned bitrate; unsigned enable_mask; Ent8::Data ent8; BitRuns::Result bitruns; Result( unsigned bitrate, unsigned enable_mask, const Ent8::Data &ent8, const BitRuns::Result &bitruns ) : bitrate( bitrate ) , enable_mask( enable_mask ) , ent8( ent8 ) , bitruns( bitruns ) {} void RankResults( size_t i, Ranking &r ) { //{{{ const Ent8::Result &e8 = ent8.result[Ent8::CURRENT]; r.AddResult( BITRUN_BIAS, Rank( i, fabs(1.0 - bitruns.GetBias()) ) ); r.AddResult( BITRUN_CHISQ, Rank( i, bitruns.GetChisq() ) ); r.AddResult( ENT_ENTROPY, Rank( i, e8.entropy, Rank::DESCENDING ) ); r.AddResult( ENT_CHISQ, Rank( i, e8.chisq ) ); r.AddResult( ENT_MEAN, Rank( i, fabs(127.5 - e8.mean) ) ); r.AddResult( ENT_PI, Rank( i, fabs(e8.PiError()) ) ); r.AddResult( ENT_CORR, Rank( i, fabs(e8.corr) ) ); r.AddResult( ENT_MINENTROPY, Rank( i, e8.minentropy, Rank::DESCENDING ) ); } //}}} void Report( size_t n, Ranking &r ) { //{{{ if( enable_mask == 0x0f ) printf("%u Hz\n", bitrate ); else if( BitB::popcount( enable_mask ) == 1 ) printf("%u Hz, generator %u\n", bitrate, BitB::fls(enable_mask) - 1 ); else printf("%u Hz, generator mask 0x%02x\n", bitrate, enable_mask ); double chisqp; double chisq = bitruns.GetChisq( &chisqp ); printf( "Max run of %3zu (expected %3zu), bias %s%.9f%s, χ² %s%.3f%s (p = %f)\n", bitruns.maxrun, bitruns.GetExpectedMax(), r.ResultColour(BITRUN_BIAS, n), bitruns.GetBias(), END_COLOUR, r.ResultColour(BITRUN_CHISQ, n), chisq, END_COLOUR, chisqp ); const Ent8::Result &e8 = ent8.result[Ent8::CURRENT]; printf( "Ent8: Hs %s%f%s, Hm %s%f%s, Mean %s%f%s, Corr %s% .8f%s," " π %s%.8f%s (% .5f), χ² %s%f%s (%.2f)\n", r.ResultColour(ENT_ENTROPY, n), e8.entropy, END_COLOUR, r.ResultColour(ENT_MINENTROPY, n), e8.minentropy, END_COLOUR, r.ResultColour(ENT_MEAN, n), e8.mean, END_COLOUR, r.ResultColour(ENT_CORR, n), e8.corr, END_COLOUR, r.ResultColour(ENT_PI, n), e8.pi, END_COLOUR, e8.PiError(), r.ResultColour(ENT_CHISQ, n), e8.chisq, END_COLOUR, e8.ChisqProb() ); } //}}} void Report() { //{{{ if( enable_mask == 0x0f ) printf("%u Hz\n", bitrate ); else if( BitB::popcount( enable_mask ) == 1 ) printf("%u Hz, generator %u\n", bitrate, BitB::fls(enable_mask) - 1 ); else printf("%u Hz, generator mask 0x%02x\n", bitrate, enable_mask ); double chisqp; double chisq = bitruns.GetChisq( &chisqp ); printf( "Max run of %3zu (expected %3zu), bias %.9f, χ² %.3f (p = %f)\n", bitruns.maxrun, bitruns.GetExpectedMax(), bitruns.GetBias(), chisq, chisqp ); printf( "Ent8: %s\n", ent8.result[Ent8::CURRENT].Report().c_str() ); } //}}} }; //}}} private: USBContext::Device::Handle m_dev; Options m_options; string m_id; pthread_t m_threadid; uint8_t *m_buf; Result::Vector m_results; static unsigned DecrementBitrate( unsigned rate ) { return 30000000 / (30000000 / rate + 1); } void run_test( const BitBabbler::Options &bbo ) { //{{{ static unsigned nchunks[] = { 16, 10, 8, 5, 2 }; BitBabbler b( m_dev, bbo ); unsigned fold = b.GetFolding(); size_t bs = m_options.block_size; size_t len = m_options.test_len * (1u << fold); unsigned sec = unsigned(len * 8 / bbo.bitrate); unsigned min = sec / 60; size_t e8short_len = m_options.test_len; for( size_t i = 0; i < sizeof(nchunks) / sizeof(*nchunks); ++i ) { if( m_options.test_len % nchunks[i] == 0 ) { e8short_len = m_options.test_len / nchunks[i]; break; } } if( min ) Log<1>( _("Test %s reading %zu samples at %u Hz (will take ~%u:%02u min)\n"), m_id.c_str(), len, bbo.bitrate, min, sec - 60 * min ); else Log<1>( _("Test %s reading %zu samples at %u Hz (will take ~%u sec)\n"), m_id.c_str(), len, bbo.bitrate, sec ); BitRuns bitruns; Ent8 ent8( e8short_len ); for( size_t n = 0; n < len; ) { size_t rs = std::min( bs, len ); for( size_t r = 0; r < rs; ) r += b.read( m_buf + r, std::min( size_t(65536), rs - r ) ); len -= rs; size_t flen = BitB::FoldBytes( m_buf, rs, fold ); ent8.Analyse( m_buf, flen ); bitruns.AddBits( m_buf, flen ); } bitruns.flush(); if( m_options.show_all ) { // We need to globally mutex this block so that tests which complete // at the same time don't intermingle the output of their results. static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; BitB::ScopedMutex lock( &mutex ); if( bbo.enable_mask == 0x0f ) printf( CYAN_IF("\n%s %u Hz\n"), m_id.c_str(), bbo.bitrate ); else if( BitB::popcount( bbo.enable_mask ) == 1 ) printf( CYAN_IF("\n%s %u Hz, generator %u\n"), m_id.c_str(), bbo.bitrate, BitB::fls(bbo.enable_mask) - 1 ); else printf( CYAN_IF("\n%s %u Hz, generator mask 0x%02x\n"), m_id.c_str(), bbo.bitrate, bbo.enable_mask ); printf( "%s\n", bitruns.GetResult().Report().c_str() ); printf( "\n Ent8 short, %s\n", ent8.ShortTermData().ReportResults().c_str() ); printf( "\n Ent8 long, %s\n", ent8.LongTermData().ReportResults().c_str() ); } m_results.push_back( new Result( bbo.bitrate, bbo.enable_mask, ent8.LongTermData(), bitruns.GetResult() ) ); } //}}} void run_test_thread() { //{{{ BitBabbler::Options bbo = m_options.bboptions; m_buf = new uint8_t[ m_options.block_size ]; for( bbo.bitrate = m_options.bitrate_max; bbo.bitrate >= m_options.bitrate_min; bbo.bitrate = DecrementBitrate( bbo.bitrate ) ) { if( (m_options.bboptions.enable_mask & 0xf) == 0 ) { bbo.enable_mask = 1; for( size_t i = 0; i < 4; ++i ) { run_test( bbo ); bbo.enable_mask <<= 1; } if( (m_options.bboptions.enable_mask & 0x10) == 0 ) continue; bbo.enable_mask = 0x0f; } run_test( bbo ); } } //}}} static void *test_thread( void *p ) { //{{{ Test::Handle h = static_cast( p ); // Drop the 'virtual handle' from the ctor, we have a real one now. h->Unref(); try { h->run_test_thread(); } catch( const abi::__forced_unwind& ) { Log<3>( _("Test %s thread cancelled\n"), h->m_id.c_str() ); throw; } BB_CATCH_STD( 0, stringprintf( _("uncaught exception in Test %s thread"), h->m_id.c_str() ).c_str() ) return NULL; } //}}} void begin_tests() { //{{{ using BitB::SystemError; // Bump the refcount until the thread is started, otherwise we // may lose a race with this Test being released by the caller // before the thread can take its handle from the raw pointer. // Think of it as a virtual Handle passed with pthread_create. // // In practice, this isn't actually a problem in the current code // because the only time the Test might be destroyed before the // thread has run its course is if we're crash diving our way out // after getting an early termination signal, when no unwinding // will be done anyway. But conceptually the problem is real in // this class, so handle it correctly in case future use changes. Ref(); // We don't need to Unref() if this fails, because we'll throw // and it will never have been constructed to be destroyed ... // That assumes this method is only ever called from the ctor, // which currently is true. int ret = pthread_create( &m_threadid, BitB::GetDefaultThreadAttr(), test_thread, this ); if( ret ) throw SystemError( ret, _("Test %s failed to create thread"), m_id.c_str() ); } //}}} public: Test( const USBContext::Device::Handle &dev, const Options &options ) : m_dev( dev ) , m_options( options ) , m_id( dev->GetSerial() ) , m_buf( NULL ) { begin_tests(); } ~Test() { delete [] m_buf; } void WaitForCompletion() const { pthread_join( m_threadid, NULL ); Log<1>( _("Test %s completed\n"), m_id.c_str() ); } void ReportResults() const { //{{{ unsigned bitrate = 0; printf( "\n%s:\n", m_id.c_str() ); if( ! m_options.colour ) { // We could just disable colouring in Result::Ranking, but we don't // actually need to do the collation and sorting if we aren't going // to indicate the relative ranking of the results anyway. for( size_t i = 0, n = m_results.size(); i < n; ++i ) { if( bitrate && m_results[i]->bitrate != bitrate ) putchar('\n'); bitrate = m_results[i]->bitrate; m_results[i]->Report(); } return; } Result::Ranking r; for( size_t i = 0, n = m_results.size(); i < n; ++i ) m_results[i]->RankResults( i, r ); r.SortResults(); for( size_t i = 0, n = m_results.size(); i < n; ++i ) { if( bitrate && m_results[i]->bitrate != bitrate ) putchar('\n'); bitrate = m_results[i]->bitrate; m_results[i]->Report( i, r ); } } //}}} }; //}}} static void usage() { printf("Usage: bbcheck [OPTION...]\n"); printf("\n"); printf("Run automated tests on BitBabbler hardware RNG devices\n"); printf("\n"); printf("Options:\n"); printf(" -s, --scan Scan for available devices\n"); printf(" -i, --device-id=id Read from only the selected device(s)\n"); printf(" -r, --bitrate=Hz[:Hz max] Set the bitrate range to scan\n"); printf(" -b, --bytes=n The number of bytes to test\n"); printf(" -B, --block-size=bytes Set the folding block size\n"); printf(" -A, --all-results Show all results, not just the summary\n"); printf(" -v, --verbose Enable verbose output\n"); printf(" --no-colour Don't colourise final results\n"); printf(" -?, --help Show this help message\n"); printf(" --version Print the program version\n"); printf("\n"); printf("Per device options:\n"); printf(" --latency=ms Override the USB latency timer\n"); printf(" -f, --fold=n Set the amount of entropy folding\n"); printf(" --enable-mask=mask Select a subset of the generators\n"); printf(" --limit-max-xfer Limit the transfer chunk size to 16kB\n"); printf("\n"); printf("Report bugs to support@bitbabbler.org\n"); printf("\n"); } int main( int argc, char *argv[] ) { try { unsigned opt_scan = 0; Test::Options opt_testoptions; BitBabbler::Options default_options; BitBabbler::Options::List device_options; enum { LATENCY_OPT, ENABLEMASK_OPT, LIMIT_MAX_XFER, NOCOLOUR_OPT, VERSION_OPT }; struct option long_options[] = { { "scan", no_argument, NULL, 's' }, { "device-id", required_argument, NULL, 'i' }, { "bitrate", required_argument, NULL, 'r' }, { "bytes", required_argument, NULL, 'b' }, { "block-size", required_argument, NULL, 'B' }, { "latency", required_argument, NULL, LATENCY_OPT }, { "fold", required_argument, NULL, 'f' }, { "enable-mask", required_argument, NULL, ENABLEMASK_OPT }, { "limit-max-xfer", no_argument, NULL, LIMIT_MAX_XFER }, { "no-colour", no_argument, NULL, NOCOLOUR_OPT }, { "all-results", no_argument, NULL, 'A' }, { "verbose", no_argument, NULL, 'v' }, { "help", no_argument, NULL, '?' }, { "version", no_argument, NULL, VERSION_OPT }, { 0, 0, 0, 0 } }; int opt_index = 0; for(;;) { //{{{ int c = getopt_long( argc, argv, ":si:r:b:B:f:Av?", long_options, &opt_index ); if( c == -1 ) break; switch(c) { case 's': opt_scan = 1; break; case 'i': { BitBabbler::Options bbo = default_options; try { bbo.id = optarg; } catch( const std::exception &e ) { fprintf( stderr, "%s: error, %s\n", argv[0], e.what() ); return EXIT_FAILURE; } device_options.push_back( bbo ); break; } case 'r': { string r( optarg ); size_t n = r.find(':'); if( n == string::npos ) { opt_testoptions.bitrate_min = opt_testoptions.bitrate_max = BitBabbler::RealBitrate( unsigned(StrToScaledD(optarg)) ); } else { opt_testoptions.bitrate_min = BitBabbler::RealBitrate( unsigned(StrToScaledD(r.substr(0,n))) ); opt_testoptions.bitrate_max = BitBabbler::RealBitrate( unsigned(StrToScaledD(r.substr(n+1))) ); } break; } case 'b': opt_testoptions.test_len = StrToScaledUL( optarg, 1024 ); break; case 'B': opt_testoptions.block_size = StrToScaledU( optarg, 1024 ); break; case LATENCY_OPT: { unsigned latency = StrToU( optarg, 10 ); if( device_options.empty() ) default_options.latency = latency; else device_options.back().latency = latency; break; } case 'f': { unsigned fold = StrToU( optarg, 10 ); if( device_options.empty() ) default_options.fold = fold; else device_options.back().fold = fold; break; } case ENABLEMASK_OPT: { unsigned mask = StrToU( optarg ); if( device_options.empty() ) default_options.enable_mask = mask; else device_options.back().enable_mask = mask; break; } case LIMIT_MAX_XFER: if( device_options.empty() ) default_options.chunksize = 16384; else device_options.back().chunksize = 16384; break; case NOCOLOUR_OPT: opt_testoptions.colour = false; break; case 'A': opt_testoptions.show_all = true; break; case 'v': ++BitB::opt_verbose; break; case '?': if( optopt != '?' && optopt != 0 ) { fprintf(stderr, "%s: invalid option -- '%c', try --help\n", argv[0], optopt); return EXIT_FAILURE; } usage(); return EXIT_SUCCESS; case ':': fprintf(stderr, "%s: missing argument for '%s', try --help\n", argv[0], argv[optind - 1] ); return EXIT_FAILURE; case VERSION_OPT: printf("bbcheck " PACKAGE_VERSION "\n"); return EXIT_SUCCESS; } } //}}} BitB::Devices d; if( opt_scan ) { d.ListDevices(); return EXIT_SUCCESS; } else if( d.GetNumDevices() == 0 ) { fprintf( stderr, _("bbcheck: No devices found, aborting.\n") ); return EXIT_FAILURE; } Test::List tests; if( device_options.empty() ) { // We don't want devices that were hotplugged after this was started to // be considered here. This isn't a daemon, it just runs a one-shot set // of tests, so start them up for just the currently available set. USBContext::Device::List devices = d.GetDevices(); opt_testoptions.bboptions = default_options; for( USBContext::Device::List::iterator i = devices.begin(), e = devices.end(); i != e; ++i ) tests.push_back( new Test( *i, opt_testoptions ) ); } else { for( BitBabbler::Options::List::const_iterator i = device_options.begin(), e = device_options.end(); i != e; ++i ) { opt_testoptions.bboptions = *i; tests.push_back( new Test( d.GetDevice( i->id ), opt_testoptions ) ); } } for( Test::List::iterator i = tests.begin(), e = tests.end(); i != e; ++i ) (*i)->WaitForCompletion(); for( Test::List::iterator i = tests.begin(), e = tests.end(); i != e; ++i ) (*i)->ReportResults(); return EXIT_SUCCESS; } BB_CATCH_ALL( 0, _("bbcheck fatal exception") ) return EXIT_FAILURE; } // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/src/bbctl.cpp0000644000000000000000000004415214136173163013011 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2014 - 2018, Ron #include "private_setup.h" #include #include #include #include #include using BitB::Json; using BitB::ClientSock; using BitB::QA::Ent8; using BitB::QA::Ent16; using BitB::StrToU; using BitB::StrToScaledU; using BitB::StrToScaledUL; using BitB::Error; using BitB::Log; using BitB::stringprintf; using std::string; static void usage() { printf("Usage: bbctl [OPTION...]\n"); printf("\n"); printf("Query and control tool for BitBabbler hardware RNG devices\n"); printf("\n"); printf("Options:\n"); printf(" -s, --scan Scan for active devices\n"); printf(" -i, --device-id=id Act on only a single device\n"); printf(" -b, --bin-freq Report the 8-bit symbols sorted by frequency\n"); printf(" -B, --bin-freq16 Report the 16-bit symbols sorted by frequency\n"); printf(" --bin-count Report the 8-bit symbols in symbol order\n"); printf(" --bin-count16 Report the 16-bit symbols in symbol order\n"); printf(" --first=n Show only the first n bins\n"); printf(" --last=n Show only the last n bins\n"); printf(" -r, --bit-runs Report on runs of consecutive bits\n"); printf(" -S, --stats Report general QA statistics\n"); printf(" -c, --control-socket=path The service socket to query\n"); printf(" -V, --log-verbosity=n Change the logging verbosity\n"); printf(" --waitfor=dev:n:r:max Wait for a device to pass some number of bytes\n"); printf(" -v, --verbose Enable verbose output\n"); printf(" -?, --help Show this help message\n"); printf(" --version Print the program version\n"); printf("\n"); printf("Report bugs to support@bitbabbler.org\n"); printf("\n"); } struct WaitFor { //{{{ typedef std::list< WaitFor > List; std::string deviceid; size_t bytes; size_t retry_ms; size_t timeout_ms; WaitFor( const std::string &arg ) : retry_ms( 1000 ) , timeout_ms( 0 ) { //{{{ // Parse the options from a string of the form: // device:bytes:retry_ms:timeout_ms // Where device and bytes are mandatory. size_t n = arg.find( ':' ); if( n == string::npos ) throw Error( _("No byte count given in --waitfor=%s"), arg.c_str() ); deviceid = arg.substr( 0, n ); ++n; try { size_t n2 = arg.find( ':', n ); if( n2 == string::npos ) { bytes = StrToScaledUL( arg.substr(n), 1024 ); return; } bytes = StrToScaledUL( arg.substr(n, n2 - n), 1024 ); n = n2 + 1; n2 = arg.find( ':', n ); if( n2 == string::npos ) { retry_ms = StrToScaledUL( arg.substr(n) ); goto done; } retry_ms = StrToScaledUL( arg.substr(n, n2 - n) ); n = n2 + 1; timeout_ms = StrToScaledUL( arg.substr(n) ); done: if( retry_ms < 1 ) throw Error( _("Retry time must be >= 1ms in --waitfor=%s"), arg.c_str() ); } catch( const std::exception &e ) { throw Error( _("Invalid --waitfor argument '%s': %s"), arg.c_str(), e.what() ); } } //}}} }; //}}} int main( int argc, char *argv[] ) { try { unsigned opt_scan = 0; unsigned opt_bin_count = 0; unsigned opt_bin_freq = 0; unsigned opt_bit_runs = 0; unsigned opt_stats = 0; unsigned opt_first = 65536; unsigned opt_last = 65536; unsigned opt_log_level = unsigned(-1); string opt_deviceid; string opt_controlsock = SEEDD_CONTROL_SOCKET; WaitFor::List opt_wait; enum { BINCOUNT_OPT, BINCOUNT16_OPT, FIRST_OPT, LAST_OPT, WAITFOR_OPT, VERSION_OPT }; struct option long_options[] = { { "scan", no_argument, NULL, 's' }, { "device-id", required_argument, NULL, 'i' }, { "bin-freq", no_argument, NULL, 'b' }, { "bin-freq16", no_argument, NULL, 'B' }, { "bin-count", no_argument, NULL, BINCOUNT_OPT }, { "bin-count16", no_argument, NULL, BINCOUNT16_OPT }, { "first", required_argument, NULL, FIRST_OPT }, { "last", required_argument, NULL, LAST_OPT }, { "bit-runs", no_argument, NULL, 'r' }, { "stats", no_argument, NULL, 'S' }, { "control-socket", required_argument, NULL, 'c' }, { "log-verbosity", required_argument, NULL, 'V' }, { "waitfor", required_argument, NULL, WAITFOR_OPT }, { "verbose", no_argument, NULL, 'v' }, { "help", no_argument, NULL, '?' }, { "version", no_argument, NULL, VERSION_OPT }, { 0, 0, 0, 0 } }; int opt_index = 0; for(;;) { //{{{ int c = getopt_long( argc, argv, ":si:c:bBrSV:v?", long_options, &opt_index ); if( c == -1 ) break; switch(c) { case 's': opt_scan = 1; break; case 'i': opt_deviceid = optarg; break; case 'b': opt_bin_freq = 1; break; case 'B': opt_bin_freq = 16; break; case BINCOUNT_OPT: opt_bin_freq = 1; opt_bin_count = 1; break; case BINCOUNT16_OPT: opt_bin_freq = 16; opt_bin_count = 1; break; case FIRST_OPT: opt_first = StrToScaledU( optarg ); if( opt_last == 65536 ) opt_last = 0; break; case LAST_OPT: opt_last = StrToScaledU( optarg ); if( opt_first == 65536 ) opt_first = 0; break; case 'r': opt_bit_runs = 1; break; case 'S': opt_stats = 1; break; case 'c': opt_controlsock = optarg; break; case 'V': opt_log_level = StrToU( optarg, 10 ); break; case WAITFOR_OPT: opt_wait.push_back( WaitFor(optarg) ); break; case 'v': ++BitB::opt_verbose; break; case '?': if( optopt != '?' && optopt != 0 ) { fprintf(stderr, "%s: invalid option -- '%c', try --help\n", argv[0], optopt); return EXIT_FAILURE; } usage(); return EXIT_SUCCESS; case ':': fprintf(stderr, "%s: missing argument for '%s', try --help\n", argv[0], argv[optind - 1] ); return EXIT_FAILURE; case VERSION_OPT: printf("bbctl " PACKAGE_VERSION "\n"); return EXIT_SUCCESS; } } //}}} ClientSock client( opt_controlsock ); if( opt_log_level != unsigned(-1) ) { //{{{ client.SendRequest( stringprintf( "[\"SetLogVerbosity\",0,%u]", opt_log_level ) ); Json::Handle json = client.Read(); Log<4>("read reply: %s\n", json->JSONStr().c_str() ); if( json[0]->String() == "SetLogVerbosity" ) { printf( "Log verbosity is now %u\n", unsigned(json[2]) ); } else { Log<0>( "unrecognised reply\n" ); } } //}}} if( opt_scan ) { //{{{ client.SendRequest( "\"GetIDs\"" ); Json::Handle json = client.Read(); Log<4>("read reply: %s\n", json->JSONStr().c_str() ); if( json[0]->String() == "GetIDs" ) { Json::Data::Handle ids = json[2]; size_t n = ids->GetArraySize(); printf( P_("Have %zu active device:\n", "Have %zu active devices:\n", n), n ); for( size_t i = 0; i < n; ++i ) printf( _(" Device ID: %s\n"), ids[i]->String().c_str() ); } else { Log<0>( "unrecognised reply\n" ); } } //}}} while( ! opt_wait.empty() ) { //{{{ const WaitFor &w = opt_wait.front(); size_t elapsed; if( w.timeout_ms ) Log<1>( _("Waiting up to %zu ms for %zu good bytes from %s\n"), w.timeout_ms, w.bytes, w.deviceid.c_str() ); else Log<1>( _("Waiting for %zu good bytes from %s\n"), w.bytes, w.deviceid.c_str() ); for( elapsed = 0; w.timeout_ms == 0 || elapsed < w.timeout_ms; elapsed += w.retry_ms ) { client.SendRequest( "[\"ReportStats\",1,\"" + w.deviceid + "\"]" ); Json::Handle json = client.Read(); Log<4>("read reply: %s\n", json->JSONStr().c_str() ); if( json[0]->String() == "ReportStats" ) { Json::Data::Handle stats = json[2]->Get( w.deviceid ); if( ! stats ) throw Error( _("No statistics available for device '%s'"), w.deviceid.c_str() ); unsigned long long passed = stats["QA"]["BytesPassed"]-> As(); if( passed >= w.bytes ) { Log<1>( _("Have %llu good bytes from %s in %zums\n"), passed, w.deviceid.c_str(), elapsed ); goto done; } Log<3>( _("Have %llu good bytes from %s in %zums (waiting for %zu)\n"), passed, w.deviceid.c_str(), elapsed, w.bytes ); } else { // Possibly we should throw here too, but since this should // never happen, assume it's a glitch and just try again. Log<0>( "Unrecognised reply to ReportStats request\n" ); } usleep( useconds_t( w.retry_ms * 1000 ) ); } throw Error( _("Timeout after %zums waiting for %zu bytes from %s\n"), elapsed, w.bytes, w.deviceid.c_str() ); done: opt_wait.pop_front(); } //}}} if( opt_bin_freq ) { //{{{ if( opt_deviceid.empty() ) client.SendRequest( "\"GetRawData\"" ); else client.SendRequest( "[\"GetRawData\",1,\"" + opt_deviceid + "\"]" ); Json::Handle json = client.Read(); Log<4>("read reply: %s\n", json->JSONStr().c_str() ); if( json[0]->String() == "GetRawData" ) { Json::Data::Handle data = json[2]; Json::MemberList sources; data->GetMembers( sources ); for( Json::MemberList::iterator si = sources.begin(), se = sources.end(); si != se; ++si ) { Json::Data::Handle ent8 = data[*si]->Get("Ent8"); Json::Data::Handle ent16 = data[*si]->Get("Ent16"); if( ! ent8 ) { printf( "\nsource: %s has no 8-bit data (yet)\n", si->c_str() ); } else { Ent8::Data e8_short( ent8["Short"] ); Ent8::Data e8_long( ent8["Long"] ); if( opt_bin_count ) { printf( "\nsource: %s\n%s\n", si->c_str(), e8_short.ReportBins( opt_first, opt_last ).c_str() ); printf( "\nsource: %s\n%s\n", si->c_str(), e8_long.ReportBins( opt_first, opt_last ).c_str() ); } else { printf( "\nsource: %s\n%s\n", si->c_str(), e8_short.ReportBinsByFreq( opt_first, opt_last ).c_str() ); printf( "\nsource: %s\n%s\n", si->c_str(), e8_long.ReportBinsByFreq( opt_first, opt_last ).c_str() ); } } if( opt_bin_freq == 16 ) { if( ! ent16 ) { printf( "\nsource: %s has no 16-bit data (yet)\n", si->c_str() ); } else { Ent16::Data e16_short( ent16["Short"] ); Ent16::Data e16_long( ent16["Long"] ); if( opt_bin_count ) { printf( "\nsource: %s\n%s\n", si->c_str(), e16_short.ReportBins( opt_first, opt_last ).c_str() ); printf( "\nsource: %s\n%s\n", si->c_str(), e16_long.ReportBins( opt_first, opt_last ).c_str() ); } else { printf( "\nsource: %s\n%s\n", si->c_str(), e16_short.ReportBinsByFreq( opt_first, opt_last ).c_str() ); printf( "\nsource: %s\n%s\n", si->c_str(), e16_long.ReportBinsByFreq( opt_first, opt_last ).c_str() ); } } } } } else { Log<0>( "unrecognised reply\n" ); } } //}}} if( opt_bit_runs ) { //{{{ using BitB::QA::BitRuns; if( opt_deviceid.empty() ) client.SendRequest( "\"ReportStats\"" ); else client.SendRequest( "[\"ReportStats\",1,\"" + opt_deviceid + "\"]" ); Json::Handle json = client.Read(); Log<4>("read reply: %s\n", json->JSONStr().c_str() ); if( json[0]->String() == "ReportStats" ) { Json::Data::Handle stats = json[2]; Json::MemberList sources; stats->GetMembers( sources ); for( Json::MemberList::iterator si = sources.begin(), se = sources.end(); si != se; ++si ) { BitRuns::Result bitruns( stats[*si]["BitRuns"] ); printf( "\nsource: %s\n%s\n", si->c_str(), bitruns.Report().c_str() ); } } else { Log<0>( "unrecognised reply\n" ); } } //}}} if( opt_stats ) { //{{{ if( opt_deviceid.empty() ) client.SendRequest( "\"ReportStats\"" ); else client.SendRequest( "[\"ReportStats\",1,\"" + opt_deviceid + "\"]" ); Json::Handle json = client.Read(); Log<4>("read reply: %s\n", json->JSONStr().c_str() ); if( json[0]->String() == "ReportStats" ) { Json::Data::Handle stats = json[2]; Json::MemberList sources; stats->GetMembers( sources ); for( Json::MemberList::iterator si = sources.begin(), se = sources.end(); si != se; ++si ) { unsigned long long analysed = stats[*si]["QA"]["BytesAnalysed"]-> As(); unsigned long long passed = stats[*si]["QA"]["BytesPassed"]-> As(); BitB::QA::FIPS fips( stats[*si]["FIPS"] ); printf( "\nsource: %s\n", si->c_str() ); printf( "Octets analysed %llu, passed %llu, (not passed %llu)\n", analysed, passed, analysed - passed ); printf( "FIPS %s\n", fips.ReportFailRates().c_str() ); printf( "FIPS %s\n", fips.ReportPassRuns().c_str() ); Json::Data::Handle ent8 = stats[*si]->Get("Ent8"); Json::Data::Handle ent16 = stats[*si]->Get("Ent16"); if( ! ent8 ) { printf( "Ent8: no results (yet)\n" ); } else { Ent8::Data e8_short( Ent8::Results_Only, ent8["Short"] ); Ent8::Data e8_long( Ent8::Results_Only, ent8["Long"] ); printf( "Ent8 short %s\n", e8_short.ReportResults().c_str() ); printf( "Ent8 long %s\n", e8_long.ReportResults().c_str() ); } if( ! ent16 ) { printf( "Ent16: no results (yet)\n" ); } else { Ent16::Data e16_short( Ent16::Results_Only, ent16["Short"] ); Ent16::Data e16_long( Ent16::Results_Only, ent16["Long"] ); printf( "Ent16 short %s\n", e16_short.ReportResults().c_str() ); printf( "Ent16 long %s\n", e16_long.ReportResults().c_str() ); } } } else { Log<0>( "unrecognised reply\n" ); } } //}}} return EXIT_SUCCESS; } BB_CATCH_ALL( 0, _("bbctl fatal exception") ) return EXIT_FAILURE; } // vi:sts=4:sw=4:et:foldmethod=marker bit-babbler-0.9/src/seedd.cpp0000644000000000000000000011557314136173163013015 0ustar // This file is distributed as part of the bit-babbler package. // Copyright 2012 - 2018, Ron #ifndef _REENTRANT #error "seedd requires pthread support" #endif #include "private_setup.h" #include #include #include #include #include #include #include #include using BitB::BitBabbler; using BitB::Pool; using BitB::SocketSource; using BitB::ControlSock; using BitB::CreateControlSocket; using BitB::SecretSink; using BitB::StrToU; using BitB::StrToScaledU; using BitB::StrToScaledUL; using BitB::StrToScaledD; using BitB::afterfirst; using BitB::beforefirst; using BitB::stringprintf; using BitB::Error; using BitB::SystemError; using BitB::Log; static void usage() { printf("Usage: seedd [OPTION...]\n"); printf("\n"); printf("Read entropy from BitBabbler hardware RNG devices\n"); printf("\n"); printf("Options:\n"); printf(" -s, --scan Scan for available devices\n"); printf(" --shell-mr Output a machine readable list of devices\n"); printf(" -C, --config=file Read configuration options from a file\n"); printf(" -i, --device-id=id Read from only the selected device(s)\n"); printf(" -b, --bytes=n Send n bytes to stdout\n"); printf(" -o, --stdout Send entropy to stdout\n"); printf(" -d, --daemon Run as a background daemon\n"); printf(" -k, --kernel Feed entropy to the kernel\n"); printf(" -u, --udp-out=host:port Provide a UDP socket for entropy output\n"); printf(" -c, --control-socket=path Where to create the control socket\n"); printf(" --socket-group=grp Grant group access to the control socket\n"); printf(" --ip-freebind Allow sockets to be bound to dynamic interfaces\n"); printf(" -P, --pool-size=n Size of the entropy pool\n"); printf(" --kernel-device=path Where to feed entropy to the OS kernel\n"); printf(" --kernel-refill=sec Max time in seconds before OS pool refresh\n"); printf(" -G, --group-size=g:n Size of a single pool group\n"); printf(" --watch=path:ms:bs:n Monitor an external device\n"); printf(" --gen-conf Output a config file using the options passed\n"); printf(" -v, --verbose Enable verbose output\n"); printf(" -?, --help Show this help message\n"); printf(" --version Print the program version\n"); printf("\n"); printf("Per device options:\n"); printf(" -r, --bitrate=Hz Set the bitrate (in bits per second)\n"); printf(" --latency=ms Override the USB latency timer\n"); printf(" -f, --fold=n Set the amount of entropy folding\n"); printf(" -g, --group=n The pool group to add the device to\n"); printf(" --enable-mask=mask Select a subset of the generators\n"); printf(" --idle-sleep=init:max Tune the rate of pool refresh when idle\n"); printf(" --suspend-after=ms Set the threshold for USB autosuspend\n"); printf(" --low-power Convenience preset for idle and suspend\n"); printf(" --limit-max-xfer Limit the transfer chunk size to 16kB\n"); printf(" --no-qa Don't drop blocks that fail QA checking\n"); printf("\n"); printf("Report bugs to support@bitbabbler.org\n"); printf("\n"); } #if EM_PLATFORM_POSIX static void WriteCompletion( void *p ) { pthread_t *t = static_cast( p ); pthread_kill( *t, SIGRTMIN ); } #else static pthread_mutex_t wait_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t wait_cond = PTHREAD_COND_INITIALIZER; static int done_waiting = 0; static void WriteCompletion( void *p ) { (void)p; BitB::ScopedMutex lock( &wait_mutex ); done_waiting = 1; pthread_cond_broadcast( &wait_cond ); } #endif // Configuration options, imported from file(s) and/or the command line. class Config : public BitB::IniData { //{{{ private: // The last --device-id passed on the command line, which any // subsequent per-device options there should be applied to. std::string m_curdev; Validator::Handle m_validator; // Option validator for unsigned number values in any base. static void UnsignedValue( const std::string &option, const std::string &value ) { //{{{ try { StrToU( value ); } catch( const std::exception &e ) { throw Error( _("Option '%s' expected integer: %s"), option.c_str(), e.what() ); } } //}}} // Option validator for base-10 unsigned integer values. static void UnsignedBase10Value( const std::string &option, const std::string &value ) { //{{{ try { StrToU( value, 10 ); } catch( const std::exception &e ) { throw Error( _("Option '%s' expected decimal integer: %s"), option.c_str(), e.what() ); } } //}}} // Option validator for base-10 unsigned integer values, optionally scaled by a suffix. static void ScaledUnsignedValue( const std::string &option, const std::string &value ) { //{{{ try { StrToScaledUL( value ); } catch( const std::exception &e ) { throw Error( _("Option '%s' expected decimal integer: %s"), option.c_str(), e.what() ); } } //}}} // Option validator for decimal fraction values, optionally scaled by a suffix. static void ScaledFloatValue( const std::string &option, const std::string &value ) { //{{{ try { StrToScaledD( value ); } catch( const std::exception &e ) { throw Error( _("Option '%s' expected decimal value: %s"), option.c_str(), e.what() ); } } //}}} // Validate Sections and Options. // We don't exhaustively validate all the option values here, mostly we just // want to catch invalid section and option names, but there's no reason not // to do basic sanity checking of the easy ones at this stage too. void validate() { //{{{ if( ! m_validator ) { // We may have multiple (or no) config files to validate, // so create this once the first time that it's needed. m_validator = new Validator; // [Service] section options Validator::OptionList::Handle service_opts = new Validator::OptionList; service_opts->AddTest( "daemon", Validator::OptionWithoutValue ) ->AddTest( "kernel", Validator::OptionWithoutValue ) ->AddTest( "udp-out", Validator::OptionWithValue ) ->AddTest( "control-socket", Validator::OptionWithValue ) ->AddTest( "socket-group", Validator::OptionWithValue ) ->AddTest( "ip-freebind", Validator::OptionWithoutValue ) ->AddTest( "verbose", UnsignedValue ); m_validator->Section( "Service", Validator::SectionNameEquals, service_opts ); // [Pool] section options Validator::OptionList::Handle pool_opts = new Validator::OptionList; pool_opts->AddTest( "size", ScaledUnsignedValue ) ->AddTest( "kernel-device", Validator::OptionWithValue ) ->AddTest( "kernel-refill", UnsignedBase10Value ); m_validator->Section( "Pool", Validator::SectionNameEquals, pool_opts ); // [PoolGroup:] section options Validator::OptionList::Handle poolgroup_opts = new Validator::OptionList; poolgroup_opts->AddTest( "size", ScaledUnsignedValue ); m_validator->Section( "PoolGroup:", Validator::SectionNamePrefix, poolgroup_opts ); // [Devices] and [Device:] section options Validator::OptionList::Handle device_opts = new Validator::OptionList; device_opts->AddTest( "bitrate", ScaledFloatValue ) ->AddTest( "latency", UnsignedBase10Value ) ->AddTest( "fold", UnsignedBase10Value ) ->AddTest( "group", UnsignedBase10Value ) ->AddTest( "enable-mask", UnsignedValue ) ->AddTest( "idle-sleep", Validator::OptionWithValue ) ->AddTest( "suspend-after", ScaledUnsignedValue ) ->AddTest( "low-power", Validator::OptionWithoutValue ) ->AddTest( "limit-max-xfer", Validator::OptionWithoutValue ) ->AddTest( "no-qa", Validator::OptionWithoutValue ); m_validator->Section( "Devices", Validator::SectionNameEquals, device_opts ); m_validator->Section( "Device:", Validator::SectionNamePrefix, device_opts ); // [Watch:] section options Validator::OptionList::Handle watch_opts = new Validator::OptionList; watch_opts->AddTest( "path", Validator::OptionWithValue ) ->AddTest( "delay", ScaledUnsignedValue ) ->AddTest( "block-size", ScaledUnsignedValue ) ->AddTest( "max-bytes", ScaledUnsignedValue ); m_validator->Section( "Watch:", Validator::SectionNamePrefix, watch_opts ); } m_validator->Validate( *this ); } //}}} // Implementation detail to handle the 'low-power' option, which is set as // a (per)Device option, implicitly setting the global Pool kernel-refill // time too. We need to check this when exporting Pool options, even if // there was no explict [Pool] section otherwise defined. void check_pool_low_power_option( Pool::Options &p ) const { //{{{ if( HasOption("Devices", "low-power") ) { p.kernel_refill_time = 3600; } else { const Sections &s = GetSections("Device:"); for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) { if( i->second->HasOption("low-power") ) { p.kernel_refill_time = 3600; break; } } } } //}}} // Implementation detail for extracting the per-device options from either // the global [Devices] section or an individual [Device:] definition (with // the global [Devices] options used as defaults for it unless overridden). BitBabbler::Options get_device_options( const std::string §ion, const std::string &device_id = std::string(), const BitBabbler::Options &defaults = BitBabbler::Options() ) const { //{{{ BitBabbler::Options bbo = defaults; Section::Handle s = GetSection( section ); std::string opt; if( ! device_id.empty() ) bbo.id = device_id; try { opt = "bitrate"; if( s->HasOption( opt ) ) bbo.bitrate = unsigned(StrToScaledD( s->GetOption(opt) )); opt = "latency"; if( s->HasOption( opt ) ) bbo.latency = StrToU( s->GetOption(opt), 10 ); opt = "fold"; if( s->HasOption( opt ) ) bbo.fold = StrToU( s->GetOption(opt), 10 ); opt = "group"; if( s->HasOption( opt ) ) bbo.group = StrToU( s->GetOption(opt), 10 ); opt = "enable-mask"; if( s->HasOption( opt ) ) bbo.enable_mask = StrToU( s->GetOption(opt) ); opt = "low-power"; if( s->HasOption( opt ) ) { bbo.SetIdleSleep( "100:0" ); bbo.suspend_after = 10000; } opt = "suspend-after"; if( s->HasOption( opt ) ) bbo.suspend_after = StrToScaledU( s->GetOption(opt) ); opt = "no-qa"; if( s->HasOption( opt ) ) bbo.no_qa = true; opt = "limit-max-xfer"; if( s->HasOption( opt ) ) bbo.chunksize = 16384; opt = "idle-sleep"; if( s->HasOption( opt ) ) bbo.SetIdleSleep( s->GetOption(opt) ); } catch( const std::exception &e ) { throw Error( _("Failed to apply [%s] option '%s': %s"), section.c_str(), opt.c_str(), e.what() ); } return bbo; } //}}} public: // We only need a trivial default constructor at present. Config() {} // Update the current state with (additional) options from an INI file. // As with command line options, where some option setting is duplicated, // the last one applied will override any seen previously. void ImportFile( const char *path ) { //{{{ char buf[65536]; std::string data; size_t n; FILE *f = fopen( path, "r" ); if( ! f ) throw SystemError( _("Failed to open config file '%s'"), path ); while(( n = fread( buf, 1, sizeof(buf), f ) )) data.append( buf, n ); fclose( f ); try { UpdateWith( data ); validate(); } catch( const std::exception &e ) { throw Error( _("Failed to import config from '%s': %s"), path, e.what() ); } } //}}} // Export entropy Pool configuration options. Pool::Options GetPoolOptions() const { //{{{ Pool::Options p; std::string opt; try { if( HasSection("Pool") ) { Section::Handle s = GetSection("Pool"); opt = "size"; if( s->HasOption( opt ) ) p.pool_size = StrToScaledUL( s->GetOption(opt), 1024 ); opt = "kernel-device"; if( s->HasOption( opt ) ) p.kernel_device = s->GetOption(opt); // This one is set implicitly to 3600 if any device uses the // low-power option, unless it is explicitly set directly. opt = "kernel-refill"; if( s->HasOption( opt ) ) p.kernel_refill_time = StrToU( s->GetOption(opt), 10 ); else check_pool_low_power_option( p ); } else { opt = "kernel-refill (low-power)"; check_pool_low_power_option( p ); } } catch( const std::exception &e ) { throw Error( _("Failed to apply [Pool] option '%s': %s"), opt.c_str(), e.what() ); } return p; } //}}} // Export a list of defined entropy Pool groups. Pool::Group::Options::List GetPoolGroupOptions() const { //{{{ const Sections &s = GetSections("PoolGroup:"); Pool::Group::Options::List g; for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) { std::string opt = i->first + ':' + GetOption(i->second, "size"); g.push_back( Pool::Group::Options( opt.c_str() ) ); } return g; } //}}} // Add a [Device:] definition for a --device-id passed on the command line. // And remember the last device added that way so that any subsequent // per-device options on the command line will be applied to it too. void AddDevice( const char *id ) { //{{{ m_curdev = stringprintf( "Device:%s", id ); if( ! HasSection( m_curdev ) ) AddSection( m_curdev ); } //}}} // Set (or override) a per-device option from the command line. // If no --device-id has been passed yet, the option will be set in the // global [Devices] section, otherwise it will be set for the specific // [Device:] which was last requested. void SetDeviceOption( const std::string &option, const std::string &value = std::string() ) { AddOrUpdateOption( m_curdev.empty() ? "Devices" : m_curdev, option, value ); } // Export the default [Devices] options to use for any devices which don't // have an explicit [Device:] configuration of their own. BitBabbler::Options GetDefaultDeviceOptions() const { //{{{ if( HasSection("Devices") ) return get_device_options("Devices"); return BitBabbler::Options(); } //}}} // Export a list of the individual [Device:] configuration options for each // device that has one configured for it. BitBabbler::Options::List GetDeviceOptions() const { //{{{ BitBabbler::Options::List bbol; BitBabbler::Options default_options = GetDefaultDeviceOptions(); const Sections &s = GetSections("Device:"); for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) { bbol.push_back( get_device_options( i->second->GetName(), i->first, default_options ) ); } return bbol; } //}}} // Add a new [Watch:] definition from options passed on the command line. void AddWatch( const std::string &arg ) { //{{{ using std::string; const Sections &s = GetSections("Watch:"); unsigned next_watch = 0; // If there are numbered Watch sections, find the current largest number. // If someone really uses a number larger than will fit in unsigned int, // and mixes a config file with command line watches, they'll get what // it is that they did to themselves. But unless they really have > 4G // watches set, we will still probably find a safe number to use here // even with some truncated value(s) in the mix. // // The alternative would be to just iterate next_watch from 0 until we // find the first value which isn't a collision, but this way is probably // nicer since it orders all command line watches after any defined with // numeric identifiers in the config file. for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) { try { unsigned isnum = StrToU( i->first ); if( isnum >= next_watch ) next_watch = isnum + 1; } catch( const std::exception& ) { // It's not an error for Watch identifiers to not be a number, // we just don't take those into account when generating one // for a watch specified on the command line. //Log<0>( "AddWatch: '%s' is not a number\n", i->first.c_str() ); } } //Log<0>( "Next watch is %u\n", n ); // Parse the options struct from a string of the form: // path:delay:block_size:total_bytes // where everything except the path portion is optional. // // This is similar to what is done in SecretSink::Options::ParseOptArg() // except we don't normalise the numeric values here, we just keep them // as the literal strings which were passed on the command line for now. // They'll get converted to numeric types when actually used. Section::Handle sect = AddSection( stringprintf("Watch:%u", next_watch) ); size_t n = arg.find(':'); size_t n2; if( n == string::npos ) { AddOption( sect, "path", arg ); return; } AddOption( sect, "path", arg.substr(0, n) ); ++n; n2 = arg.find( ':', n ); if( n2 == string::npos ) { AddOption( sect, "delay", arg.substr(n) ); return; } AddOption( sect, "delay", arg.substr(n, n2 - n) ); n = n2 + 1; n2 = arg.find( ':', n ); if( n2 == string::npos ) { AddOption( sect, "block-size", arg.substr(n) ); return; } AddOption( sect, "block-size", arg.substr(n, n2 - n) ); n = n2 + 1; AddOption( sect, "max-bytes", arg.substr(n) ); } //}}} // Export a list of the source Watches to enable. SecretSink::Options::List GetWatchOptions() const { //{{{ const Sections &s = GetSections("Watch:"); SecretSink::Options::List w; for( Sections::const_iterator i = s.begin(), e = s.end(); i != e; ++i ) { // Track which option we're applying, so that we // can report its name if an exception is thrown. std::string opt; try { SecretSink::Options sso; opt = "path"; if( i->second->HasOption( opt ) ) sso.devpath = i->second->GetOption( opt ); else throw Error( _("No path defined to Watch") ); opt = "delay"; if( i->second->HasOption( opt ) ) sso.block_delay = StrToScaledUL( i->second->GetOption( opt ) ); opt = "block-size"; if( i->second->HasOption( opt ) ) sso.block_size = StrToScaledUL( i->second->GetOption( opt ), 1024 ); opt = "max-bytes"; if( i->second->HasOption( opt ) ) sso.bytes = StrToScaledUL( i->second->GetOption( opt ), 1024 ); w.push_back( sso ); } catch( const std::exception &e ) { throw Error( _("Failed to apply [Watch:%s] option '%s': %s"), i->first.c_str(), opt.c_str(), e.what() ); } } return w; } //}}} // Specialisation of IniData::INIStr() to output the expected sections in // a logical (for users) and deterministic (if using a hashed map) order. // This string may be saved and later passed to ImportFile() or Decode() // to recreate the current configuration state. std::string ConfigStr() const { //{{{ std::string out; Sections s = GetSections(), ss; Sections::iterator i = s.find("Service"), e; // Output the Service section if( i != s.end() ) { out.append( i->second->INIStr() + '\n' ); s.erase( i ); } // Output the Pool section i = s.find("Pool"); if( i != s.end() ) { out.append( i->second->INIStr() + '\n' ); s.erase( i ); } // Output the PoolGroup section(s) ss = GetSections("PoolGroup:"); for( i = ss.begin(), e = ss.end(); i != e; ++i ) { out.append( i->second->INIStr() + '\n' ); s.erase( i->second->GetName() ); } // Output the Devices section i = s.find("Devices"); if( i != s.end() ) { out.append( i->second->INIStr() + '\n' ); s.erase( i ); } // Output the Device section(s) ss = GetSections("Device:"); for( i = ss.begin(), e = ss.end(); i != e; ++i ) { out.append( i->second->INIStr() + '\n' ); s.erase( i->second->GetName() ); } // Output the Watch section(s) ss = GetSections("Watch:"); for( i = ss.begin(), e = ss.end(); i != e; ++i ) { out.append( i->second->INIStr() + '\n' ); s.erase( i->second->GetName() ); } // Output whatever else is still left for( i = s.begin(), e = s.end(); i != e; ++i ) out.append( i->second->INIStr() + '\n' ); return out; } //}}} }; //}}} int main( int argc, char *argv[] ) { try { Config conf; unsigned opt_scan = 0; size_t opt_bytes = 0; unsigned opt_stdout = 0; int opt_v = 0; bool opt_genconf = false; enum { SHELL_MR_OPT, FREEBIND_OPT, SOCKET_GROUP_OPT, KERNEL_DEVICE_OPT, KERNEL_REFILL_TIME_OPT, LATENCY_OPT, ENABLEMASK_OPT, IDLE_SLEEP_OPT, SUSPEND_AFTER_OPT, LOW_POWER_OPT, LIMIT_MAX_XFER, NOQA_OPT, WATCH_OPT, GENERATE_CONFIG_OPT, VERSION_OPT }; struct option long_options[] = { { "scan", no_argument, NULL, 's' }, { "shell-mr", no_argument, NULL, SHELL_MR_OPT }, { "config", required_argument, NULL, 'C' }, { "device-id", required_argument, NULL, 'i' }, { "bytes", required_argument, NULL, 'b' }, { "stdout", no_argument, NULL, 'o' }, { "daemon", no_argument, NULL, 'd' }, { "kernel", no_argument, NULL, 'k' }, { "ip-freebind", no_argument, NULL, FREEBIND_OPT }, { "udp-out", required_argument, NULL, 'u' }, { "control-socket", required_argument, NULL, 'c' }, { "socket-group", required_argument, NULL, SOCKET_GROUP_OPT }, { "pool-size", required_argument, NULL, 'P' }, { "kernel-device", required_argument, NULL, KERNEL_DEVICE_OPT }, { "kernel-refill", required_argument, NULL, KERNEL_REFILL_TIME_OPT }, { "group-size", required_argument, NULL, 'G' }, { "bitrate", required_argument, NULL, 'r' }, { "latency", required_argument, NULL, LATENCY_OPT }, { "fold", required_argument, NULL, 'f' }, { "group", required_argument, NULL, 'g' }, { "enable-mask", required_argument, NULL, ENABLEMASK_OPT }, { "idle-sleep", required_argument, NULL, IDLE_SLEEP_OPT }, { "suspend-after", required_argument, NULL, SUSPEND_AFTER_OPT }, { "low-power", no_argument, NULL, LOW_POWER_OPT }, { "limit-max-xfer", no_argument, NULL, LIMIT_MAX_XFER }, { "no-qa", no_argument, NULL, NOQA_OPT }, { "watch", required_argument, NULL, WATCH_OPT }, { "gen-conf", no_argument, NULL, GENERATE_CONFIG_OPT }, { "verbose", no_argument, NULL, 'v' }, { "help", no_argument, NULL, '?' }, { "version", no_argument, NULL, VERSION_OPT }, { 0, 0, 0, 0 } }; int opt_index = 0; for(;;) { //{{{ int c = getopt_long( argc, argv, ":sC:i:r:f:g:b:dku:oP:G:c:v?", long_options, &opt_index ); if( c == -1 ) break; switch(c) { case 's': opt_scan = 1; break; case SHELL_MR_OPT: opt_scan = 2; break; case 'C': conf.ImportFile( optarg ); break; case 'i': conf.AddDevice( optarg ); break; case 'b': opt_bytes = StrToScaledUL( optarg, 1024 ); break; case 'o': opt_stdout = 1; break; case 'd': conf.AddOrUpdateOption( "Service", "daemon" ); break; case 'k': conf.AddOrUpdateOption( "Service", "kernel" ); break; case FREEBIND_OPT: conf.AddOrUpdateOption( "Service", "ip-freebind" ); break; case 'u': conf.AddOrUpdateOption( "Service", "udp-out", optarg ); break; case 'c': conf.AddOrUpdateOption( "Service", "control-socket", optarg ); break; case SOCKET_GROUP_OPT: conf.AddOrUpdateOption( "Service", "socket-group", optarg ); break; case 'P': conf.AddOrUpdateOption( "Pool", "size", optarg ); break; case KERNEL_DEVICE_OPT: conf.AddOrUpdateOption( "Pool", "kernel-device", optarg ); break; case KERNEL_REFILL_TIME_OPT: conf.AddOrUpdateOption( "Pool", "kernel-refill", optarg ); break; case 'G': { std::string s( optarg ); conf.AddOrUpdateOption( "PoolGroup:" + beforefirst(':', s), "size", afterfirst(':', s) ); break; } case 'r': conf.SetDeviceOption( "bitrate", optarg ); break; case LATENCY_OPT: conf.SetDeviceOption( "latency", optarg ); break; case 'f': conf.SetDeviceOption( "fold", optarg ); break; case 'g': conf.SetDeviceOption( "group", optarg ); break; case ENABLEMASK_OPT: conf.SetDeviceOption( "enable-mask", optarg ); break; case IDLE_SLEEP_OPT: conf.SetDeviceOption( "idle-sleep", optarg ); break; case SUSPEND_AFTER_OPT: conf.SetDeviceOption( "suspend-after", optarg ); break; case LOW_POWER_OPT: conf.SetDeviceOption( "low-power" ); break; case LIMIT_MAX_XFER: conf.SetDeviceOption( "limit-max-xfer" ); break; case NOQA_OPT: conf.SetDeviceOption( "no-qa" ); break; case WATCH_OPT: conf.AddWatch( optarg ); break; case GENERATE_CONFIG_OPT: opt_genconf = true; break; case 'v': ++opt_v; break; case '?': if( optopt != '?' && optopt != 0 ) { fprintf(stderr, "%s: invalid option -- '%c', try --help\n", argv[0], optopt); return EXIT_FAILURE; } // If we're generating a config, don't dump the usage to stdout // under any circumstances and do return an EXIT_FAILURE code. if( opt_genconf ) { fprintf(stderr, "%s: invalid option used, not generating config\n", argv[0]); return EXIT_FAILURE; } usage(); return EXIT_SUCCESS; case ':': fprintf(stderr, "%s: missing argument for '%s', try --help\n", argv[0], argv[optind - 1] ); return EXIT_FAILURE; case VERSION_OPT: printf("seedd " PACKAGE_VERSION "\n"); return EXIT_SUCCESS; } } //}}} std::string notify_socket = BitB::GetSystemdNotifySocket(); // If we've been started by systemd in notify mode we need to stay in the // foreground regardless of what config options we may have been passed. if( ! notify_socket.empty() ) conf.RemoveOption( "Service", "daemon" ); // Just output a configuration file (based on the options passed) and exit. if( opt_genconf ) { //{{{ std::string cmd_line; for( int i = 0; i < argc; ++i ) cmd_line.append( stringprintf(" %s", argv[i]) ); // We don't usually push the -v command line override into the config // but do it here, because we want that in the generated config if used. if( opt_v ) conf.AddOrUpdateOption( "Service", "verbose", stringprintf("%d", opt_v) ); printf( "# Generated configuration file for seedd(1), created %s using:\n" "# %s\n%s\n", BitB::timeprintf( "%F", BitB::GetWallTimeval() ).c_str(), cmd_line.c_str(), conf.ConfigStr().c_str() ); return EXIT_SUCCESS; } //}}} // Pump up the volume (if asked to) if( opt_v ) BitB::opt_verbose = opt_v; else if ( conf.HasOption("Service", "verbose") ) BitB::opt_verbose = int(StrToU( conf.GetOption("Service", "verbose") )); // And send it to syslog if we'll be running in the background. if( conf.HasOption("Service", "daemon") && ! opt_scan ) BitB::SendLogsToSyslog( argv[0] ); if( ! notify_socket.empty() ) Log<4>( "NOTIFY_SOCKET='%s'\n", notify_socket.c_str() ); Log<2>( "Using configuration:\n%s", conf.ConfigStr().c_str() ); // Extract and (initially) sanity check these before going to // the background if we're going to be running this as a daemon. Pool::Options pool_options = conf.GetPoolOptions(); Pool::Group::Options::List group_options = conf.GetPoolGroupOptions(); SecretSink::Options::List watch_options = conf.GetWatchOptions(); BitBabbler::Options default_options = conf.GetDefaultDeviceOptions(); BitBabbler::Options::List device_options = conf.GetDeviceOptions(); #if EM_PLATFORM_POSIX if( conf.HasOption("Service", "daemon") && ! opt_scan ) { if( daemon(0,0) ) throw SystemError( _("Failed to fork daemon") ); umask( S_IWGRP | S_IROTH | S_IWOTH | S_IXOTH ); } BitB::BlockSignals(); #else // We could implement support for this if/when needed, but it's less useful // on systems where we don't sit in the background feeding the OS kernel. if( conf.HasOption("Service", "daemon") && ! opt_scan ) throw Error( _("Daemon mode not supported on this platform.") ); #endif BitB::Devices d; if( opt_scan ) { switch( opt_scan ) { case 1: d.ListDevices(); return EXIT_SUCCESS; case 2: d.ListDevicesShellMR(); return EXIT_SUCCESS; } fprintf(stderr, "seedd: unknown device scan option %u\n", opt_scan ); return EXIT_FAILURE; } else if( d.GetNumDevices() == 0 && ! d.HasHotplugSupport() ) { // If we don't have hotplug support, and we don't have any devices now, // then there's no point waiting around, because none will appear later. fprintf( stderr, _("seedd: No devices found, and no hotplug support. Aborting.\n") ); return EXIT_FAILURE; } pthread_t main_thread = pthread_self(); Pool::Handle pool = new Pool( pool_options ); for( Pool::Group::Options::List::iterator i = group_options.begin(), e = group_options.end(); i != e; ++i ) pool->AddGroup( i->groupid, i->size ); d.AddDevicesToPool( pool, default_options, device_options ); SocketSource::Handle ssrc; if( conf.HasOption("Service", "udp-out") ) { opt_bytes = 0; ssrc = new SocketSource( pool, conf.GetOption("Service", "udp-out"), conf.HasOption("Service", "ip-freebind") ); } if( conf.HasOption("Service", "kernel") ) { opt_bytes = 0; pool->FeedKernelEntropyAsync(); } if( opt_stdout || opt_bytes ) { if( opt_bytes && ! conf.HasOption("Service", "control-socket") ) conf.AddOrUpdateOption( "Service", "control-socket", "none" ); #if EM_PLATFORM_MSW setmode( STDOUT_FILENO, O_BINARY ); #endif pool->WriteToFDAsync( STDOUT_FILENO, opt_bytes, WriteCompletion, &main_thread ); } SecretSink::List watch_sinks; for( SecretSink::Options::List::iterator i = watch_options.begin(), e = watch_options.end(); i != e; ++i ) watch_sinks.push_back( new SecretSink( *i ) ); ControlSock::Handle ctl = CreateControlSocket( conf.GetOption( "Service", "control-socket", SEEDD_CONTROL_SOCKET ), conf.GetOption( "Service", "socket-group", std::string() ), conf.HasOption( "Service", "ip-freebind" ) ); // If we've been started by systemd in notify mode, then notify it ... if( ! notify_socket.empty() ) BitB::SystemdNotify( "READY=1", notify_socket ); #if EM_PLATFORM_POSIX int sig; wait_for_the_signal: sig = BitB::SigWait( SIGINT, SIGQUIT, SIGTERM, SIGABRT, SIGTSTP, SIGRTMIN, SIGUSR1 ); switch( sig ) { case SIGTSTP: Log<0>( _("Stopped by signal %d (%s)\n"), sig, strsignal(sig) ); raise( SIGSTOP ); goto wait_for_the_signal; case SIGUSR1: // p.ReportState() goto wait_for_the_signal; default: // We can't switch on SIGRTMIN, it's not a constant. if( sig == SIGRTMIN ) Log<1>( _("Wrote %zu bytes to stdout\n"), opt_bytes ); else Log<0>( _("Terminated by signal %d (%s)\n"), sig, strsignal(sig) ); break; } #else BitB::ScopedMutex lock( &wait_mutex ); while( ! done_waiting ) pthread_cond_wait( &wait_cond, &wait_mutex ); #endif // If we've been started by systemd in notify mode, humour it again ... //{{{ // This is mostly useless, but not entirely, because exiting this scope // isn't the end of us yet, there's a bunch of shutdown still to be done // including terminating threads in the unwinding which happens next. // This really is still just the beginning of the end, some pathological // worst case could keep us hanging on for longer than we expected to. //}}} if( ! notify_socket.empty() ) BitB::SystemdNotify( "STOPPING=1", notify_socket ); return EXIT_SUCCESS; } BB_CATCH_ALL( 0, _("seedd fatal exception") ) return EXIT_FAILURE; } // vi:sts=4:sw=4:et:foldmethod=marker